`,
- `p:first-of-type`,
- []string{
- ``,
- },
- },
- {
- `
`,
- `p:only-child`,
- []string{
- ``,
- },
- },
- {
- `
`,
- `p:only-of-type`,
- []string{
- ``,
- },
- },
- {
- `
Hello
`,
- `:empty`,
- []string{
- ``,
- ``,
- ``,
- },
- },
- {
- ``,
- `div p`,
- []string{
- `
`,
- `
`,
- },
- },
- {
- `
`,
- `div table p`,
- []string{
- `
`,
- },
- },
- {
- `
`,
- `div > p`,
- []string{
- ``,
- `
`,
- },
- },
- {
- `
`,
- `p ~ p`,
- []string{
- `
`,
- `
`,
- },
- },
- {
- `
-
-
`,
- `p + p`,
- []string{
- `
`,
- },
- },
- {
- `
`,
- `li, p`,
- []string{
- "
",
- " ",
- "",
- },
- },
- {
- `
`,
- `p +/*This is a comment*/ p`,
- []string{
- `
`,
- },
- },
- {
- `
Text block that wraps inner text and continues
`,
- `p:contains("that wraps")`,
- []string{
- ``,
- },
- },
- {
- `
Text block that wraps inner text and continues
`,
- `p:containsOwn("that wraps")`,
- []string{},
- },
- {
- `Text block that wraps inner text and continues
`,
- `:containsOwn("inner")`,
- []string{
- ``,
- },
- },
- {
- `Text block that wraps inner text and continues
`,
- `p:containsOwn("block")`,
- []string{
- ``,
- },
- },
- {
- `
`,
- `div:has(#p1)`,
- []string{
- ``,
- },
- },
- {
- `
-
`,
- `div:has(:containsOwn("2"))`,
- []string{
- `
`,
- },
- },
- {
- `
-
`,
- `body :has(:containsOwn("2"))`,
- []string{
- `
`,
- `
`,
- },
- },
- {
- `
-
`,
- `body :haschild(:containsOwn("2"))`,
- []string{
- `
`,
- },
- },
- {
- `
0123456789
abcdef
0123ABCD
`,
- `p:matches([\d])`,
- []string{
- `
`,
- `
`,
- },
- },
- {
- `
0123456789
abcdef
0123ABCD
`,
- `p:matches([a-z])`,
- []string{
- `
`,
- },
- },
- {
- `
0123456789
abcdef
0123ABCD
`,
- `p:matches([a-zA-Z])`,
- []string{
- `
`,
- `
`,
- },
- },
- {
- `
0123456789
abcdef
0123ABCD
`,
- `p:matches([^\d])`,
- []string{
- `
`,
- `
`,
- },
- },
- {
- `
0123456789
abcdef
0123ABCD
`,
- `p:matches(^(0|a))`,
- []string{
- `
`,
- `
`,
- `
`,
- },
- },
- {
- `
0123456789
abcdef
0123ABCD
`,
- `p:matches(^\d+$)`,
- []string{
- `
`,
- },
- },
- {
- `
0123456789
abcdef
0123ABCD
`,
- `p:not(:matches(^\d+$))`,
- []string{
- `
`,
- `
`,
- },
- },
- {
- `
01234567 89
`,
- `div :matchesOwn(^\d+$)`,
- []string{
- `
`,
- ``,
- },
- },
- {
- ``,
- `[href#=(fina)]:not([href#=(\/\/[^\/]+untrusted)])`,
- []string{
- ``,
- ` `,
- },
- },
- {
- ``,
- `[href#=(^https:\/\/[^\/]*\/?news)]`,
- []string{
- ` `,
- },
- },
- {
- ``,
- `:input`,
- []string{
- ` `,
- ` `,
- ``,
- ``,
- ``,
- },
- },
-}
-
-func TestSelectors(t *testing.T) {
- for _, test := range selectorTests {
- s, err := Compile(test.selector)
- if err != nil {
- t.Errorf("error compiling %q: %s", test.selector, err)
- continue
- }
-
- doc, err := html.Parse(strings.NewReader(test.HTML))
- if err != nil {
- t.Errorf("error parsing %q: %s", test.HTML, err)
- continue
- }
-
- matches := s.MatchAll(doc)
- if len(matches) != len(test.results) {
- t.Errorf("wanted %d elements, got %d instead", len(test.results), len(matches))
- continue
- }
-
- for i, m := range matches {
- got := nodeString(m)
- if got != test.results[i] {
- t.Errorf("wanted %s, got %s instead", test.results[i], got)
- }
- }
-
- firstMatch := s.MatchFirst(doc)
- if len(test.results) == 0 {
- if firstMatch != nil {
- t.Errorf("MatchFirst: want nil, got %s", nodeString(firstMatch))
- }
- } else {
- got := nodeString(firstMatch)
- if got != test.results[0] {
- t.Errorf("MatchFirst: want %s, got %s", test.results[0], got)
- }
- }
- }
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/LICENSE b/vendor/src/github.com/andygrunwald/go-jira/LICENSE
deleted file mode 100644
index 692f6be..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Andy Grunwald
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/src/github.com/andygrunwald/go-jira/README.md b/vendor/src/github.com/andygrunwald/go-jira/README.md
deleted file mode 100644
index f9a92c9..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/README.md
+++ /dev/null
@@ -1,172 +0,0 @@
-# go-jira
-
-[![GoDoc](https://godoc.org/github.com/andygrunwald/go-jira?status.svg)](https://godoc.org/github.com/andygrunwald/go-jira)
-[![Build Status](https://travis-ci.org/andygrunwald/go-jira.svg?branch=master)](https://travis-ci.org/andygrunwald/go-jira)
-[![Go Report Card](https://goreportcard.com/badge/github.com/andygrunwald/go-jira)](https://goreportcard.com/report/github.com/andygrunwald/go-jira)
-[![Coverage Status](https://coveralls.io/repos/github/andygrunwald/go-jira/badge.svg?branch=master)](https://coveralls.io/github/andygrunwald/go-jira?branch=master)
-
-[Go](https://golang.org/) client library for [Atlassian JIRA](https://www.atlassian.com/software/jira).
-
-![Go client library for Atlassian JIRA](./img/go-jira-compressed.png "Go client library for Atlassian JIRA.")
-
-## Features
-
-* Authentication (HTTP Basic, OAuth, Session Cookie)
-* Create and receive issues
-* Create and retrieve issue transitions (status updates)
-* Call every API endpoint of the JIRA, even it is not directly implemented in this library
-
-This package is not JIRA API complete (yet), but you can call every API endpoint you want. See [Call a not implemented API endpoint](#call-a-not-implemented-api-endpoint) how to do this. For all possible API endpoints of JRIA have a look at [latest JIRA REST API documentation](https://docs.atlassian.com/jira/REST/latest/).
-
-## Compatible JIRA versions
-
-This package was tested against JIRA v6.3.4 and v7.1.2.
-
-## Installation
-
-It is go gettable
-
- $ go get github.com/andygrunwald/go-jira
-
-(optional) to run unit / example tests:
-
- $ cd $GOPATH/src/github.com/andygrunwald/go-jira
- $ go test -v ./...
-
-## API
-
-Please have a look at the [GoDoc documentation](https://godoc.org/github.com/andygrunwald/go-jira) for a detailed API description.
-
-The [latest JIRA REST API documentation](https://docs.atlassian.com/jira/REST/latest/) was the base document for this package.
-
-## Examples
-
-Further a few examples how the API can be used.
-A few more examples are available in the [GoDoc examples section](https://godoc.org/github.com/andygrunwald/go-jira#pkg-examples).
-
-### Get a single issue
-
-Lets retrieve [MESOS-3325](https://issues.apache.org/jira/browse/MESOS-3325) from the [Apache Mesos](http://mesos.apache.org/) project.
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/andygrunwald/go-jira"
-)
-
-func main() {
- jiraClient, _ := jira.NewClient(nil, "https://issues.apache.org/jira/")
- issue, _, _ := jiraClient.Issue.Get("MESOS-3325")
-
- fmt.Printf("%s: %+v\n", issue.Key, issue.Fields.Summary)
- fmt.Printf("Type: %s\n", issue.Fields.Type.Name)
- fmt.Printf("Priority: %s\n", issue.Fields.Priority.Name)
-
- // MESOS-3325: Running mesos-slave@0.23 in a container causes slave to be lost after a restart
- // Type: Bug
- // Priority: Critical
-}
-```
-
-### Authenticate with session cookie
-
-Some actions require an authenticated user.
-Here is an example with a session cookie authentification.
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/andygrunwald/go-jira"
-)
-
-func main() {
- jiraClient, err := jira.NewClient(nil, "https://your.jira-instance.com/")
- if err != nil {
- panic(err)
- }
-
- res, err := jiraClient.Authentication.AcquireSessionCookie("username", "password")
- if err != nil || res == false {
- fmt.Printf("Result: %v\n", res)
- panic(err)
- }
-
- issue, _, err := jiraClient.Issue.Get("SYS-5156")
- if err != nil {
- panic(err)
- }
-
- fmt.Printf("%s: %+v\n", issue.Key, issue.Fields.Summary)
-}
-```
-
-### Call a not implemented API endpoint
-
-Not all API endpoints of the JIRA API are implemented into *go-jira*.
-But you can call them anyway:
-Lets get all public projects of [Atlassian`s JIRA instance](https://jira.atlassian.com/).
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/andygrunwald/go-jira"
-)
-
-func main() {
- jiraClient, _ := jira.NewClient(nil, "https://jira.atlassian.com/")
- req, _ := jiraClient.NewRequest("GET", "/rest/api/2/project", nil)
-
- projects := new([]jira.Project)
- _, err := jiraClient.Do(req, projects)
- if err != nil {
- panic(err)
- }
-
- for _, project := range *projects {
- fmt.Printf("%s: %s\n", project.Key, project.Name)
- }
-
- // ...
- // BAM: Bamboo
- // BAMJ: Bamboo JIRA Plugin
- // CLOV: Clover
- // CONF: Confluence
- // ...
-}
-```
-
-## Implementations
-
-* [andygrunwald/jitic](https://github.com/andygrunwald/jitic) - The JIRA Ticket Checker
-
-## Code structure
-
-The code structure of this package was inspired by [google/go-github](https://github.com/google/go-github).
-
-There is one main part (the client).
-Based on this main client the other endpoints, like Issues or Authentication are extracted in services. E.g. `IssueService` or `AuthenticationService`.
-These services own a responsibility of the single endpoints / usecases of JIRA.
-
-## Contribution
-
-Contribution, in any kind of way, is highly welcome!
-It doesn't matter if you are not able to write code.
-Creating issues or holding talks and help other people to use [go-jira](https://github.com/andygrunwald/go-jira) is contribution, too!
-A few examples:
-
-* Correct typos in the README / documentation
-* Reporting bugs
-* Implement a new feature or endpoint
-* Sharing the love if [go-jira](https://github.com/andygrunwald/go-jira) and help people to get use to it
-
-If you are new to pull requests, checkout [Collaborating on projects using issues and pull requests / Creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
-
-## License
-
-This project is released under the terms of the [MIT license](http://en.wikipedia.org/wiki/MIT_License).
\ No newline at end of file
diff --git a/vendor/src/github.com/andygrunwald/go-jira/authentication.go b/vendor/src/github.com/andygrunwald/go-jira/authentication.go
deleted file mode 100644
index 0b9a6cc..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/authentication.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package jira
-
-import (
- "fmt"
- "net/http"
-)
-
-// AuthenticationService handles authentication for the JIRA instance / API.
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#authentication
-type AuthenticationService struct {
- client *Client
-}
-
-// Session represents a Session JSON response by the JIRA API.
-type Session struct {
- Self string `json:"self,omitempty"`
- Name string `json:"name,omitempty"`
- Session struct {
- Name string `json:"name"`
- Value string `json:"value"`
- } `json:"session,omitempty"`
- LoginInfo struct {
- FailedLoginCount int `json:"failedLoginCount"`
- LoginCount int `json:"loginCount"`
- LastFailedLoginTime string `json:"lastFailedLoginTime"`
- PreviousLoginTime string `json:"previousLoginTime"`
- } `json:"loginInfo"`
- Cookies []*http.Cookie
-}
-
-// AcquireSessionCookie creates a new session for a user in JIRA.
-// Once a session has been successfully created it can be used to access any of JIRA's remote APIs and also the web UI by passing the appropriate HTTP Cookie header.
-// The header will by automatically applied to every API request.
-// Note that it is generally preferrable to use HTTP BASIC authentication with the REST API.
-// However, this resource may be used to mimic the behaviour of JIRA's log-in page (e.g. to display log-in errors to a user).
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#auth/1/session
-func (s *AuthenticationService) AcquireSessionCookie(username, password string) (bool, error) {
- apiEndpoint := "rest/auth/1/session"
- body := struct {
- Username string `json:"username"`
- Password string `json:"password"`
- }{
- username,
- password,
- }
-
- req, err := s.client.NewRequest("POST", apiEndpoint, body)
- if err != nil {
- return false, err
- }
-
- session := new(Session)
- resp, err := s.client.Do(req, session)
- session.Cookies = resp.Cookies()
-
- if err != nil {
- return false, fmt.Errorf("Auth at JIRA instance failed (HTTP(S) request). %s", err)
- }
- if resp != nil && resp.StatusCode != 200 {
- return false, fmt.Errorf("Auth at JIRA instance failed (HTTP(S) request). Status code: %d", resp.StatusCode)
- }
-
- s.client.session = session
-
- return true, nil
-}
-
-// Authenticated reports if the current Client has an authenticated session with JIRA
-func (s *AuthenticationService) Authenticated() bool {
- if s != nil {
- return s.client.session != nil
- }
- return false
-}
-
-// TODO Missing API Call GET (Returns information about the currently authenticated user's session)
-// See https://docs.atlassian.com/jira/REST/latest/#auth/1/session
-// TODO Missing API Call DELETE (Logs the current user out of JIRA, destroying the existing session, if any.)
-// See https://docs.atlassian.com/jira/REST/latest/#auth/1/session
diff --git a/vendor/src/github.com/andygrunwald/go-jira/authentication_test.go b/vendor/src/github.com/andygrunwald/go-jira/authentication_test.go
deleted file mode 100644
index d6741a3..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/authentication_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package jira
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "net/http"
- "testing"
-)
-
-func TestAuthenticationService_AcquireSessionCookie_Failure(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/auth/1/session", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, "/rest/auth/1/session")
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- t.Errorf("Error in read body: %s", err)
- }
- if bytes.Index(b, []byte(`"username":"foo"`)) < 0 {
- t.Error("No username found")
- }
- if bytes.Index(b, []byte(`"password":"bar"`)) < 0 {
- t.Error("No password found")
- }
-
- // Emulate error
- w.WriteHeader(http.StatusInternalServerError)
- })
-
- res, err := testClient.Authentication.AcquireSessionCookie("foo", "bar")
- if err == nil {
- t.Errorf("Expected error, but no error given")
- }
- if res == true {
- t.Error("Expected error, but result was true")
- }
-
- if testClient.Authentication.Authenticated() != false {
- t.Error("Expected false, but result was true")
- }
-}
-
-func TestAuthenticationService_AcquireSessionCookie_Success(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/auth/1/session", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, "/rest/auth/1/session")
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- t.Errorf("Error in read body: %s", err)
- }
- if bytes.Index(b, []byte(`"username":"foo"`)) < 0 {
- t.Error("No username found")
- }
- if bytes.Index(b, []byte(`"password":"bar"`)) < 0 {
- t.Error("No password found")
- }
-
- fmt.Fprint(w, `{"session":{"name":"JSESSIONID","value":"12345678901234567890"},"loginInfo":{"failedLoginCount":10,"loginCount":127,"lastFailedLoginTime":"2016-03-16T04:22:35.386+0000","previousLoginTime":"2016-03-16T04:22:35.386+0000"}}`)
- })
-
- res, err := testClient.Authentication.AcquireSessionCookie("foo", "bar")
- if err != nil {
- t.Errorf("No error expected. Got %s", err)
- }
- if res == false {
- t.Error("Expected result was true. Got false")
- }
-
- if testClient.Authentication.Authenticated() != true {
- t.Error("Expected true, but result was false")
- }
-}
-
-func TestAuthenticationService_Authenticated(t *testing.T) {
- // Skip setup() because we don't want a fully setup client
- testClient = new(Client)
-
- // Test before we've attempted to authenticate
- if testClient.Authentication.Authenticated() != false {
- t.Error("Expected false, but result was true")
- }
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/board.go b/vendor/src/github.com/andygrunwald/go-jira/board.go
deleted file mode 100644
index 3072edf..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/board.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package jira
-
-import (
- "fmt"
- "time"
-)
-
-// BoardService handles Agile Boards for the JIRA instance / API.
-//
-// JIRA API docs: https://docs.atlassian.com/jira-software/REST/server/
-type BoardService struct {
- client *Client
-}
-
-// BoardsList reflects a list of agile boards
-type BoardsList struct {
- MaxResults int `json:"maxResults"`
- StartAt int `json:"startAt"`
- Total int `json:"total"`
- IsLast bool `json:"isLast"`
- Values []Board `json:"values"`
-}
-
-// Board represents a JIRA agile board
-type Board struct {
- ID int `json:"id,omitempty"`
- Self string `json:"self,omitempty"`
- Name string `json:"name,omitempty"`
- Type string `json:"type,omitempty"`
- FilterID int `json:"filterId,omitempty"`
-}
-
-// BoardListOptions specifies the optional parameters to the BoardService.GetList
-type BoardListOptions struct {
- // BoardType filters results to boards of the specified type.
- // Valid values: scrum, kanban.
- BoardType string `url:"boardType,omitempty"`
- // Name filters results to boards that match or partially match the specified name.
- Name string `url:"name,omitempty"`
- // ProjectKeyOrID filters results to boards that are relevant to a project.
- // Relevance meaning that the JQL filter defined in board contains a reference to a project.
- ProjectKeyOrID string `url:"projectKeyOrId,omitempty"`
-
- SearchOptions
-}
-
-// Wrapper struct for search result
-type sprintsResult struct {
- Sprints []Sprint `json:"values"`
-}
-
-// Sprint represents a sprint on JIRA agile board
-type Sprint struct {
- ID int `json:"id"`
- Name string `json:"name"`
- CompleteDate *time.Time `json:"completeDate"`
- EndDate *time.Time `json:"endDate"`
- StartDate *time.Time `json:"startDate"`
- OriginBoardID int `json:"originBoardId"`
- Self string `json:"self"`
- State string `json:"state"`
-}
-
-// GetAllBoards will returns all boards. This only includes boards that the user has permission to view.
-//
-// JIRA API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board-getAllBoards
-func (s *BoardService) GetAllBoards(opt *BoardListOptions) (*BoardsList, *Response, error) {
- apiEndpoint := "rest/agile/1.0/board"
- url, err := addOptions(apiEndpoint, opt)
- req, err := s.client.NewRequest("GET", url, nil)
- if err != nil {
- return nil, nil, err
- }
-
- boards := new(BoardsList)
- resp, err := s.client.Do(req, boards)
- if err != nil {
- return nil, resp, err
- }
-
- return boards, resp, err
-}
-
-// GetBoard will returns the board for the given boardID.
-// This board will only be returned if the user has permission to view it.
-//
-// JIRA API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board-getBoard
-func (s *BoardService) GetBoard(boardID int) (*Board, *Response, error) {
- apiEndpoint := fmt.Sprintf("rest/agile/1.0/board/%v", boardID)
- req, err := s.client.NewRequest("GET", apiEndpoint, nil)
- if err != nil {
- return nil, nil, err
- }
-
- board := new(Board)
- resp, err := s.client.Do(req, board)
- if err != nil {
- return nil, resp, err
- }
- return board, resp, nil
-}
-
-// CreateBoard creates a new board. Board name, type and filter Id is required.
-// name - Must be less than 255 characters.
-// type - Valid values: scrum, kanban
-// filterId - Id of a filter that the user has permissions to view.
-// Note, if the user does not have the 'Create shared objects' permission and tries to create a shared board, a private
-// board will be created instead (remember that board sharing depends on the filter sharing).
-//
-// JIRA API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board-createBoard
-func (s *BoardService) CreateBoard(board *Board) (*Board, *Response, error) {
- apiEndpoint := "rest/agile/1.0/board"
- req, err := s.client.NewRequest("POST", apiEndpoint, board)
- if err != nil {
- return nil, nil, err
- }
-
- responseBoard := new(Board)
- resp, err := s.client.Do(req, responseBoard)
- if err != nil {
- return nil, resp, err
- }
-
- return responseBoard, resp, nil
-}
-
-// DeleteBoard will delete an agile board.
-//
-// JIRA API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board-deleteBoard
-func (s *BoardService) DeleteBoard(boardID int) (*Board, *Response, error) {
- apiEndpoint := fmt.Sprintf("rest/agile/1.0/board/%v", boardID)
- req, err := s.client.NewRequest("DELETE", apiEndpoint, nil)
- if err != nil {
- return nil, nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- return nil, resp, err
-}
-
-// GetAllSprints will returns all sprints from a board, for a given board Id.
-// This only includes sprints that the user has permission to view.
-//
-// JIRA API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/board/{boardId}/sprint
-func (s *BoardService) GetAllSprints(boardID string) ([]Sprint, *Response, error) {
- apiEndpoint := fmt.Sprintf("rest/agile/1.0/board/%s/sprint", boardID)
- req, err := s.client.NewRequest("GET", apiEndpoint, nil)
- if err != nil {
- return nil, nil, err
- }
-
- result := new(sprintsResult)
- resp, err := s.client.Do(req, result)
- return result.Sprints, resp, err
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/board_test.go b/vendor/src/github.com/andygrunwald/go-jira/board_test.go
deleted file mode 100644
index 454fc44..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/board_test.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package jira
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "testing"
-)
-
-func TestBoardService_GetAllBoards(t *testing.T) {
- setup()
- defer teardown()
- testAPIEdpoint := "/rest/agile/1.0/board"
-
- raw, err := ioutil.ReadFile("./mocks/all_boards.json")
- if err != nil {
- t.Error(err.Error())
- }
- testMux.HandleFunc(testAPIEdpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, testAPIEdpoint)
- fmt.Fprint(w, string(raw))
- })
-
- projects, _, err := testClient.Board.GetAllBoards(nil)
- if projects == nil {
- t.Error("Expected boards list. Boards list is nil")
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-// Test with params
-func TestBoardService_GetAllBoards_WithFilter(t *testing.T) {
- setup()
- defer teardown()
- testAPIEdpoint := "/rest/agile/1.0/board"
-
- raw, err := ioutil.ReadFile("./mocks/all_boards_filtered.json")
- if err != nil {
- t.Error(err.Error())
- }
- testMux.HandleFunc(testAPIEdpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, testAPIEdpoint)
- fmt.Fprint(w, string(raw))
- })
-
- boardsListOptions := &BoardListOptions{
- BoardType: "scrum",
- Name: "Test",
- ProjectKeyOrID: "TE",
- }
- boardsListOptions.StartAt = 1
- boardsListOptions.MaxResults = 10
-
- projects, _, err := testClient.Board.GetAllBoards(boardsListOptions)
- if projects == nil {
- t.Error("Expected boards list. Boards list is nil")
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestBoardService_GetBoard(t *testing.T) {
- setup()
- defer teardown()
- testAPIEdpoint := "/rest/agile/1.0/board/1"
-
- testMux.HandleFunc(testAPIEdpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, testAPIEdpoint)
- fmt.Fprint(w, `{"id":4,"self":"https://test.jira.org/rest/agile/1.0/board/1","name":"Test Weekly","type":"scrum"}`)
- })
-
- board, _, err := testClient.Board.GetBoard(1)
- if board == nil {
- t.Error("Expected board list. Board list is nil")
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestBoardService_GetBoard_WrongID(t *testing.T) {
- setup()
- defer teardown()
- testAPIEndpoint := "/rest/api/2/board/99999999"
-
- testMux.HandleFunc(testAPIEndpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, testAPIEndpoint)
- fmt.Fprint(w, nil)
- })
-
- board, resp, err := testClient.Board.GetBoard(99999999)
- if board != nil {
- t.Errorf("Expected nil. Got %s", err)
- }
-
- if resp.Status == "404" {
- t.Errorf("Expected status 404. Got %s", resp.Status)
- }
- if err == nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestBoardService_CreateBoard(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/agile/1.0/board", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, "/rest/agile/1.0/board")
-
- w.WriteHeader(http.StatusCreated)
- fmt.Fprint(w, `{"id":17,"self":"https://test.jira.org/rest/agile/1.0/board/17","name":"Test","type":"kanban"}`)
- })
-
- b := &Board{
- Name: "Test",
- Type: "kanban",
- FilterID: 17,
- }
- issue, _, err := testClient.Board.CreateBoard(b)
- if issue == nil {
- t.Error("Expected board. Board is nil")
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestBoardService_DeleteBoard(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/agile/1.0/board/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- testRequestURL(t, r, "/rest/agile/1.0/board/1")
-
- w.WriteHeader(http.StatusNoContent)
- fmt.Fprint(w, `{}`)
- })
-
- _, resp, err := testClient.Board.DeleteBoard(1)
- if resp.StatusCode != 204 {
- t.Error("Expected board not deleted.")
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestBoardService_GetAllSprints(t *testing.T) {
- setup()
- defer teardown()
-
- testAPIEndpoint := "/rest/agile/1.0/board/123/sprint"
-
- raw, err := ioutil.ReadFile("./mocks/sprints.json")
- if err != nil {
- t.Error(err.Error())
- }
-
- testMux.HandleFunc(testAPIEndpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, testAPIEndpoint)
- fmt.Fprint(w, string(raw))
- })
-
- sprints, _, err := testClient.Board.GetAllSprints("123")
-
- if err != nil {
- t.Errorf("Got error: %v", err)
- }
-
- if sprints == nil {
- t.Error("Expected sprint list. Got nil.")
- }
-
- if len(sprints) != 4 {
- t.Errorf("Expected 4 transitions. Got %d", len(sprints))
- }
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/img/go-jira-compressed.png b/vendor/src/github.com/andygrunwald/go-jira/img/go-jira-compressed.png
deleted file mode 100644
index 8fc584a..0000000
Binary files a/vendor/src/github.com/andygrunwald/go-jira/img/go-jira-compressed.png and /dev/null differ
diff --git a/vendor/src/github.com/andygrunwald/go-jira/issue.go b/vendor/src/github.com/andygrunwald/go-jira/issue.go
deleted file mode 100644
index b9c9610..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/issue.go
+++ /dev/null
@@ -1,574 +0,0 @@
-package jira
-
-import (
- "bytes"
- "fmt"
- "io"
- "mime/multipart"
- "net/url"
- "strings"
- "time"
-)
-
-const (
- // AssigneeAutomatic represents the value of the "Assignee: Automatic" of JIRA
- AssigneeAutomatic = "-1"
-)
-
-// IssueService handles Issues for the JIRA instance / API.
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue
-type IssueService struct {
- client *Client
-}
-
-// Issue represents a JIRA issue.
-type Issue struct {
- Expand string `json:"expand,omitempty"`
- ID string `json:"id,omitempty"`
- Self string `json:"self,omitempty"`
- Key string `json:"key,omitempty"`
- Fields *IssueFields `json:"fields,omitempty"`
-}
-
-// Attachment represents a JIRA attachment
-type Attachment struct {
- Self string `json:"self,omitempty"`
- ID string `json:"id,omitempty"`
- Filename string `json:"filename,omitempty"`
- Author *User `json:"author,omitempty"`
- Created string `json:"created,omitempty"`
- Size int `json:"size,omitempty"`
- MimeType string `json:"mimeType,omitempty"`
- Content string `json:"content,omitempty"`
- Thumbnail string `json:"thumbnail,omitempty"`
-}
-
-// Epic represents the epic to which an issue is associated
-// Not that this struct does not process the returned "color" value
-type Epic struct {
- ID int `json:"id"`
- Key string `json:"key"`
- Self string `json:"self"`
- Name string `json:"name"`
- Summary string `json:"summary"`
- Done bool `json:"done"`
-}
-
-// IssueFields represents single fields of a JIRA issue.
-// Every JIRA issue has several fields attached.
-type IssueFields struct {
- // TODO Missing fields
- // * "timespent": null,
- // * "aggregatetimespent": null,
- // * "workratio": -1,
- // * "lastViewed": null,
- // * "timeestimate": null,
- // * "aggregatetimeoriginalestimate": null,
- // * "timeoriginalestimate": null,
- // * "timetracking": {},
- // * "aggregatetimeestimate": null,
- // * "environment": null,
- // * "duedate": null,
- Type IssueType `json:"issuetype"`
- Project Project `json:"project,omitempty"`
- Resolution *Resolution `json:"resolution,omitempty"`
- Priority *Priority `json:"priority,omitempty"`
- Resolutiondate string `json:"resolutiondate,omitempty"`
- Created string `json:"created,omitempty"`
- Watches *Watches `json:"watches,omitempty"`
- Assignee *User `json:"assignee,omitempty"`
- Updated string `json:"updated,omitempty"`
- Description string `json:"description,omitempty"`
- Summary string `json:"summary"`
- Creator *User `json:"Creator,omitempty"`
- Reporter *User `json:"reporter,omitempty"`
- Components []*Component `json:"components,omitempty"`
- Status *Status `json:"status,omitempty"`
- Progress *Progress `json:"progress,omitempty"`
- AggregateProgress *Progress `json:"aggregateprogress,omitempty"`
- Worklog *Worklog `json:"worklog,omitempty"`
- IssueLinks []*IssueLink `json:"issuelinks,omitempty"`
- Comments *Comments `json:"comment,omitempty"`
- FixVersions []*FixVersion `json:"fixVersions,omitempty"`
- Labels []string `json:"labels,omitempty"`
- Subtasks []*Subtasks `json:"subtasks,omitempty"`
- Attachments []*Attachment `json:"attachment,omitempty"`
- Epic *Epic `json:"epic,omitempty"`
-}
-
-// IssueType represents a type of a JIRA issue.
-// Typical types are "Request", "Bug", "Story", ...
-type IssueType struct {
- Self string `json:"self,omitempty"`
- ID string `json:"id,omitempty"`
- Description string `json:"description,omitempty"`
- IconURL string `json:"iconUrl,omitempty"`
- Name string `json:"name,omitempty"`
- Subtask bool `json:"subtask,omitempty"`
- AvatarID int `json:"avatarId,omitempty"`
-}
-
-// Resolution represents a resolution of a JIRA issue.
-// Typical types are "Fixed", "Suspended", "Won't Fix", ...
-type Resolution struct {
- Self string `json:"self"`
- ID string `json:"id"`
- Description string `json:"description"`
- Name string `json:"name"`
-}
-
-// Priority represents a priority of a JIRA issue.
-// Typical types are "Normal", "Moderate", "Urgent", ...
-type Priority struct {
- Self string `json:"self,omitempty"`
- IconURL string `json:"iconUrl,omitempty"`
- Name string `json:"name,omitempty"`
- ID string `json:"id,omitempty"`
-}
-
-// Watches represents a type of how many user are "observing" a JIRA issue to track the status / updates.
-type Watches struct {
- Self string `json:"self,omitempty"`
- WatchCount int `json:"watchCount,omitempty"`
- IsWatching bool `json:"isWatching,omitempty"`
-}
-
-// User represents a user who is this JIRA issue assigned to.
-type User struct {
- Self string `json:"self,omitempty"`
- Name string `json:"name,omitempty"`
- Key string `json:"key,omitempty"`
- EmailAddress string `json:"emailAddress,omitempty"`
- AvatarUrls AvatarUrls `json:"avatarUrls,omitempty"`
- DisplayName string `json:"displayName,omitempty"`
- Active bool `json:"active,omitempty"`
- TimeZone string `json:"timeZone,omitempty"`
-}
-
-// AvatarUrls represents different dimensions of avatars / images
-type AvatarUrls struct {
- Four8X48 string `json:"48x48,omitempty"`
- Two4X24 string `json:"24x24,omitempty"`
- One6X16 string `json:"16x16,omitempty"`
- Three2X32 string `json:"32x32,omitempty"`
-}
-
-// Component represents a "component" of a JIRA issue.
-// Components can be user defined in every JIRA instance.
-type Component struct {
- Self string `json:"self,omitempty"`
- ID string `json:"id,omitempty"`
- Name string `json:"name,omitempty"`
-}
-
-// Status represents the current status of a JIRA issue.
-// Typical status are "Open", "In Progress", "Closed", ...
-// Status can be user defined in every JIRA instance.
-type Status struct {
- Self string `json:"self"`
- Description string `json:"description"`
- IconURL string `json:"iconUrl"`
- Name string `json:"name"`
- ID string `json:"id"`
- StatusCategory StatusCategory `json:"statusCategory"`
-}
-
-// StatusCategory represents the category a status belongs to.
-// Those categories can be user defined in every JIRA instance.
-type StatusCategory struct {
- Self string `json:"self"`
- ID int `json:"id"`
- Name string `json:"name"`
- Key string `json:"key"`
- ColorName string `json:"colorName"`
-}
-
-// Progress represents the progress of a JIRA issue.
-type Progress struct {
- Progress int `json:"progress"`
- Total int `json:"total"`
-}
-
-// Time represents the Time definition of JIRA as a time.Time of go
-type Time time.Time
-
-// Wrapper struct for search result
-type transitionResult struct {
- Transitions []Transition `json:"transitions"`
-}
-
-// Transition represents an issue transition in JIRA
-type Transition struct {
- ID string `json:"id"`
- Name string `json:"name"`
- Fields map[string]TransitionField `json:"fields"`
-}
-
-// TransitionField represents the value of one Transistion
-type TransitionField struct {
- Required bool `json:"required"`
-}
-
-// CreateTransitionPayload is used for creating new issue transitions
-type CreateTransitionPayload struct {
- Transition TransitionPayload `json:"transition"`
-}
-
-// TransitionPayload represents the request payload of Transistion calls like DoTransition
-type TransitionPayload struct {
- ID string `json:"id"`
-}
-
-// UnmarshalJSON will transform the JIRA time into a time.Time
-// during the transformation of the JIRA JSON response
-func (t *Time) UnmarshalJSON(b []byte) error {
- ti, err := time.Parse("\"2006-01-02T15:04:05.999-0700\"", string(b))
- if err != nil {
- return err
- }
- *t = Time(ti)
- return nil
-}
-
-// Worklog represents the work log of a JIRA issue.
-// One Worklog contains zero or n WorklogRecords
-// JIRA Wiki: https://confluence.atlassian.com/jira/logging-work-on-an-issue-185729605.html
-type Worklog struct {
- StartAt int `json:"startAt"`
- MaxResults int `json:"maxResults"`
- Total int `json:"total"`
- Worklogs []WorklogRecord `json:"worklogs"`
-}
-
-// WorklogRecord represents one entry of a Worklog
-type WorklogRecord struct {
- Self string `json:"self"`
- Author User `json:"author"`
- UpdateAuthor User `json:"updateAuthor"`
- Comment string `json:"comment"`
- Created Time `json:"created"`
- Updated Time `json:"updated"`
- Started Time `json:"started"`
- TimeSpent string `json:"timeSpent"`
- TimeSpentSeconds int `json:"timeSpentSeconds"`
- ID string `json:"id"`
- IssueID string `json:"issueId"`
-}
-
-// Subtasks represents all issues of a parent issue.
-type Subtasks struct {
- ID string `json:"id"`
- Key string `json:"key"`
- Self string `json:"self"`
- Fields IssueFields `json:"fields"`
-}
-
-// IssueLink represents a link between two issues in JIRA.
-type IssueLink struct {
- ID string `json:"id,omitempty"`
- Self string `json:"self,omitempty"`
- Type IssueLinkType `json:"type"`
- OutwardIssue *Issue `json:"outwardIssue"`
- InwardIssue *Issue `json:"inwardIssue"`
- Comment *Comment `json:"comment,omitempty"`
-}
-
-// IssueLinkType represents a type of a link between to issues in JIRA.
-// Typical issue link types are "Related to", "Duplicate", "Is blocked by", etc.
-type IssueLinkType struct {
- ID string `json:"id,omitempty"`
- Self string `json:"self,omitempty"`
- Name string `json:"name"`
- Inward string `json:"inward"`
- Outward string `json:"outward"`
-}
-
-// Comments represents a list of Comment.
-type Comments struct {
- Comments []*Comment `json:"comments,omitempty"`
-}
-
-// Comment represents a comment by a person to an issue in JIRA.
-type Comment struct {
- ID string `json:"id,omitempty"`
- Self string `json:"self,omitempty"`
- Name string `json:"name,omitempty"`
- Author User `json:"author,omitempty"`
- Body string `json:"body,omitempty"`
- UpdateAuthor User `json:"updateAuthor,omitempty"`
- Updated string `json:"updated,omitempty"`
- Created string `json:"created,omitempty"`
- Visibility CommentVisibility `json:"visibility,omitempty"`
-}
-
-// FixVersion represents a software release in which an issue is fixed.
-type FixVersion struct {
- Archived *bool `json:"archived,omitempty"`
- ID string `json:"id,omitempty"`
- Name string `json:"name,omitempty"`
- ProjectID int `json:"projectId,omitempty"`
- ReleaseDate string `json:"releaseDate,omitempty"`
- Released *bool `json:"released,omitempty"`
- Self string `json:"self,omitempty"`
- UserReleaseDate string `json:"userReleaseDate,omitempty"`
-}
-
-// CommentVisibility represents he visibility of a comment.
-// E.g. Type could be "role" and Value "Administrators"
-type CommentVisibility struct {
- Type string `json:"type,omitempty"`
- Value string `json:"value,omitempty"`
-}
-
-// SearchOptions specifies the optional parameters to various List methods that
-// support pagination.
-// Pagination is used for the JIRA REST APIs to conserve server resources and limit
-// response size for resources that return potentially large collection of items.
-// A request to a pages API will result in a values array wrapped in a JSON object with some paging metadata
-// Default Pagination options
-type SearchOptions struct {
- // StartAt: The starting index of the returned projects. Base index: 0.
- StartAt int `url:"startAt,omitempty"`
- // MaxResults: The maximum number of projects to return per page. Default: 50.
- MaxResults int `url:"maxResults,omitempty"`
-}
-
-// searchResult is only a small wrapper arround the Search (with JQL) method
-// to be able to parse the results
-type searchResult struct {
- Issues []Issue `json:"issues"`
- StartAt int `json:"startAt"`
- MaxResults int `json:"maxResults"`
- Total int `json:"total"`
-}
-
-// CustomFields represents custom fields of JIRA
-// This can heavily differ between JIRA instances
-type CustomFields map[string]string
-
-// Get returns a full representation of the issue for the given issue key.
-// JIRA will attempt to identify the issue by the issueIdOrKey path parameter.
-// This can be an issue id, or an issue key.
-// If the issue cannot be found via an exact match, JIRA will also look for the issue in a case-insensitive way, or by looking to see if the issue was moved.
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-getIssue
-func (s *IssueService) Get(issueID string) (*Issue, *Response, error) {
- apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s", issueID)
- req, err := s.client.NewRequest("GET", apiEndpoint, nil)
- if err != nil {
- return nil, nil, err
- }
-
- issue := new(Issue)
- resp, err := s.client.Do(req, issue)
- if err != nil {
- return nil, resp, err
- }
-
- return issue, resp, nil
-}
-
-// DownloadAttachment returns a Response of an attachment for a given attachmentID.
-// The attachment is in the Response.Body of the response.
-// This is an io.ReadCloser.
-// The caller should close the resp.Body.
-func (s *IssueService) DownloadAttachment(attachmentID string) (*Response, error) {
- apiEndpoint := fmt.Sprintf("secure/attachment/%s/", attachmentID)
- req, err := s.client.NewRequest("GET", apiEndpoint, nil)
- if err != nil {
- return nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- if err != nil {
- return resp, err
- }
-
- return resp, nil
-}
-
-// PostAttachment uploads r (io.Reader) as an attachment to a given attachmentID
-func (s *IssueService) PostAttachment(attachmentID string, r io.Reader, attachmentName string) (*[]Attachment, *Response, error) {
- apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/attachments", attachmentID)
-
- b := new(bytes.Buffer)
- writer := multipart.NewWriter(b)
-
- fw, err := writer.CreateFormFile("file", attachmentName)
- if err != nil {
- return nil, nil, err
- }
-
- if r != nil {
- // Copy the file
- if _, err = io.Copy(fw, r); err != nil {
- return nil, nil, err
- }
- }
- writer.Close()
-
- req, err := s.client.NewMultiPartRequest("POST", apiEndpoint, b)
- if err != nil {
- return nil, nil, err
- }
-
- req.Header.Set("Content-Type", writer.FormDataContentType())
-
- // PostAttachment response returns a JSON array (as multiple attachments can be posted)
- attachment := new([]Attachment)
- resp, err := s.client.Do(req, attachment)
- if err != nil {
- return nil, resp, err
- }
-
- return attachment, resp, nil
-}
-
-// Create creates an issue or a sub-task from a JSON representation.
-// Creating a sub-task is similar to creating a regular issue, with two important differences:
-// The issueType field must correspond to a sub-task issue type and you must provide a parent field in the issue create request containing the id or key of the parent issue.
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-createIssues
-func (s *IssueService) Create(issue *Issue) (*Issue, *Response, error) {
- apiEndpoint := "rest/api/2/issue/"
- req, err := s.client.NewRequest("POST", apiEndpoint, issue)
- if err != nil {
- return nil, nil, err
- }
-
- responseIssue := new(Issue)
- resp, err := s.client.Do(req, responseIssue)
- if err != nil {
- return nil, resp, err
- }
-
- return responseIssue, resp, nil
-}
-
-// AddComment adds a new comment to issueID.
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-addComment
-func (s *IssueService) AddComment(issueID string, comment *Comment) (*Comment, *Response, error) {
- apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/comment", issueID)
- req, err := s.client.NewRequest("POST", apiEndpoint, comment)
- if err != nil {
- return nil, nil, err
- }
-
- responseComment := new(Comment)
- resp, err := s.client.Do(req, responseComment)
- if err != nil {
- return nil, resp, err
- }
-
- return responseComment, resp, nil
-}
-
-// AddLink adds a link between two issues.
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issueLink
-func (s *IssueService) AddLink(issueLink *IssueLink) (*Response, error) {
- apiEndpoint := fmt.Sprintf("rest/api/2/issueLink")
- req, err := s.client.NewRequest("POST", apiEndpoint, issueLink)
- if err != nil {
- return nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- return resp, err
-}
-
-// Search will search for tickets according to the jql
-//
-// JIRA API docs: https://developer.atlassian.com/jiradev/jira-apis/jira-rest-apis/jira-rest-api-tutorials/jira-rest-api-example-query-issues
-func (s *IssueService) Search(jql string, options *SearchOptions) ([]Issue, *Response, error) {
- var u string
- if options == nil {
- u = fmt.Sprintf("rest/api/2/search?jql=%s", url.QueryEscape(jql))
- } else {
- u = fmt.Sprintf("rest/api/2/search?jql=%s&startAt=%d&maxResults=%d", url.QueryEscape(jql),
- options.StartAt, options.MaxResults)
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return []Issue{}, nil, err
- }
-
- v := new(searchResult)
- resp, err := s.client.Do(req, v)
- return v.Issues, resp, err
-}
-
-// GetCustomFields returns a map of customfield_* keys with string values
-func (s *IssueService) GetCustomFields(issueID string) (CustomFields, *Response, error) {
- apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s", issueID)
- req, err := s.client.NewRequest("GET", apiEndpoint, nil)
- if err != nil {
- return nil, nil, err
- }
-
- issue := new(map[string]interface{})
- resp, err := s.client.Do(req, issue)
- if err != nil {
- return nil, resp, err
- }
-
- m := *issue
- f := m["fields"]
- cf := make(CustomFields)
- if f == nil {
- return cf, resp, nil
- }
-
- if rec, ok := f.(map[string]interface{}); ok {
- for key, val := range rec {
- if strings.Contains(key, "customfield") {
- cf[key] = fmt.Sprint(val)
- }
- }
- }
- return cf, resp, nil
-}
-
-// GetTransitions gets a list of the transitions possible for this issue by the current user,
-// along with fields that are required and their types.
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-getTransitions
-func (s *IssueService) GetTransitions(id string) ([]Transition, *Response, error) {
- apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/transitions?expand=transitions.fields", id)
- req, err := s.client.NewRequest("GET", apiEndpoint, nil)
- if err != nil {
- return nil, nil, err
- }
-
- result := new(transitionResult)
- resp, err := s.client.Do(req, result)
- return result.Transitions, resp, err
-}
-
-// DoTransition performs a transition on an issue.
-// When performing the transition you can update or set other issue fields.
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/issue-doTransition
-func (s *IssueService) DoTransition(ticketID, transitionID string) (*Response, error) {
- apiEndpoint := fmt.Sprintf("rest/api/2/issue/%s/transitions", ticketID)
-
- payload := CreateTransitionPayload{
- Transition: TransitionPayload{
- ID: transitionID,
- },
- }
- req, err := s.client.NewRequest("POST", apiEndpoint, payload)
- if err != nil {
- return nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- if err != nil {
- return nil, err
- }
-
- return resp, nil
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/issue_test.go b/vendor/src/github.com/andygrunwald/go-jira/issue_test.go
deleted file mode 100644
index 1cead6f..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/issue_test.go
+++ /dev/null
@@ -1,467 +0,0 @@
-package jira
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "reflect"
- "strings"
- "testing"
-)
-
-func TestIssueService_Get_Success(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/issue/10002", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, "/rest/api/2/issue/10002")
-
- fmt.Fprint(w, `{"expand":"renderedFields,names,schema,transitions,operations,editmeta,changelog,versionedRepresentations","id":"10002","self":"http://www.example.com/jira/rest/api/2/issue/10002","key":"EX-1","fields":{"watcher":{"self":"http://www.example.com/jira/rest/api/2/issue/EX-1/watchers","isWatching":false,"watchCount":1,"watchers":[{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false}]},"attachment":[{"self":"http://www.example.com/jira/rest/api/2.0/attachments/10000","filename":"picture.jpg","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","avatarUrls":{"48x48":"http://www.example.com/jira/secure/useravatar?size=large&ownerId=fred","24x24":"http://www.example.com/jira/secure/useravatar?size=small&ownerId=fred","16x16":"http://www.example.com/jira/secure/useravatar?size=xsmall&ownerId=fred","32x32":"http://www.example.com/jira/secure/useravatar?size=medium&ownerId=fred"},"displayName":"Fred F. User","active":false},"created":"2016-03-16T04:22:37.461+0000","size":23123,"mimeType":"image/jpeg","content":"http://www.example.com/jira/attachments/10000","thumbnail":"http://www.example.com/jira/secure/thumbnail/10000"}],"sub-tasks":[{"id":"10000","type":{"id":"10000","name":"","inward":"Parent","outward":"Sub-task"},"outwardIssue":{"id":"10003","key":"EX-2","self":"http://www.example.com/jira/rest/api/2/issue/EX-2","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}}],"description":"example bug report","project":{"self":"http://www.example.com/jira/rest/api/2/project/EX","id":"10000","key":"EX","name":"Example","avatarUrls":{"48x48":"http://www.example.com/jira/secure/projectavatar?size=large&pid=10000","24x24":"http://www.example.com/jira/secure/projectavatar?size=small&pid=10000","16x16":"http://www.example.com/jira/secure/projectavatar?size=xsmall&pid=10000","32x32":"http://www.example.com/jira/secure/projectavatar?size=medium&pid=10000"},"projectCategory":{"self":"http://www.example.com/jira/rest/api/2/projectCategory/10000","id":"10000","name":"FIRST","description":"First Project Category"}},"comment":{"comments":[{"self":"http://www.example.com/jira/rest/api/2/issue/10010/comment/10000","id":"10000","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"body":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eget venenatis elit. Duis eu justo eget augue iaculis fermentum. Sed semper quam laoreet nisi egestas at posuere augue semper.","updateAuthor":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"created":"2016-03-16T04:22:37.356+0000","updated":"2016-03-16T04:22:37.356+0000","visibility":{"type":"role","value":"Administrators"}}]},"issuelinks":[{"id":"10001","type":{"id":"10000","name":"Dependent","inward":"depends on","outward":"is depended by"},"outwardIssue":{"id":"10004L","key":"PRJ-2","self":"http://www.example.com/jira/rest/api/2/issue/PRJ-2","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}},{"id":"10002","type":{"id":"10000","name":"Dependent","inward":"depends on","outward":"is depended by"},"inwardIssue":{"id":"10004","key":"PRJ-3","self":"http://www.example.com/jira/rest/api/2/issue/PRJ-3","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}}],"worklog":{"worklogs":[{"self":"http://www.example.com/jira/rest/api/2/issue/10010/worklog/10000","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"updateAuthor":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"comment":"I did some work here.","updated":"2016-03-16T04:22:37.471+0000","visibility":{"type":"group","value":"jira-developers"},"started":"2016-03-16T04:22:37.471+0000","timeSpent":"3h 20m","timeSpentSeconds":12000,"id":"100028","issueId":"10002"}]},"updated":"2016-04-06T02:36:53.594-0700","timetracking":{"originalEstimate":"10m","remainingEstimate":"3m","timeSpent":"6m","originalEstimateSeconds":600,"remainingEstimateSeconds":200,"timeSpentSeconds":400}},"names":{"watcher":"watcher","attachment":"attachment","sub-tasks":"sub-tasks","description":"description","project":"project","comment":"comment","issuelinks":"issuelinks","worklog":"worklog","updated":"updated","timetracking":"timetracking"},"schema":{}}`)
- })
-
- issue, _, err := testClient.Issue.Get("10002")
- if issue == nil {
- t.Error("Expected issue. Issue is nil")
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestIssueService_Create(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/issue/", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, "/rest/api/2/issue/")
-
- w.WriteHeader(http.StatusCreated)
- fmt.Fprint(w, `{"expand":"renderedFields,names,schema,transitions,operations,editmeta,changelog,versionedRepresentations","id":"10002","self":"http://www.example.com/jira/rest/api/2/issue/10002","key":"EX-1","fields":{"watcher":{"self":"http://www.example.com/jira/rest/api/2/issue/EX-1/watchers","isWatching":false,"watchCount":1,"watchers":[{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false}]},"attachment":[{"self":"http://www.example.com/jira/rest/api/2.0/attachments/10000","filename":"picture.jpg","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","avatarUrls":{"48x48":"http://www.example.com/jira/secure/useravatar?size=large&ownerId=fred","24x24":"http://www.example.com/jira/secure/useravatar?size=small&ownerId=fred","16x16":"http://www.example.com/jira/secure/useravatar?size=xsmall&ownerId=fred","32x32":"http://www.example.com/jira/secure/useravatar?size=medium&ownerId=fred"},"displayName":"Fred F. User","active":false},"created":"2016-03-16T04:22:37.461+0000","size":23123,"mimeType":"image/jpeg","content":"http://www.example.com/jira/attachments/10000","thumbnail":"http://www.example.com/jira/secure/thumbnail/10000"}],"sub-tasks":[{"id":"10000","type":{"id":"10000","name":"","inward":"Parent","outward":"Sub-task"},"outwardIssue":{"id":"10003","key":"EX-2","self":"http://www.example.com/jira/rest/api/2/issue/EX-2","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}}],"description":"example bug report","project":{"self":"http://www.example.com/jira/rest/api/2/project/EX","id":"10000","key":"EX","name":"Example","avatarUrls":{"48x48":"http://www.example.com/jira/secure/projectavatar?size=large&pid=10000","24x24":"http://www.example.com/jira/secure/projectavatar?size=small&pid=10000","16x16":"http://www.example.com/jira/secure/projectavatar?size=xsmall&pid=10000","32x32":"http://www.example.com/jira/secure/projectavatar?size=medium&pid=10000"},"projectCategory":{"self":"http://www.example.com/jira/rest/api/2/projectCategory/10000","id":"10000","name":"FIRST","description":"First Project Category"}},"comment":{"comments":[{"self":"http://www.example.com/jira/rest/api/2/issue/10010/comment/10000","id":"10000","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"body":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eget venenatis elit. Duis eu justo eget augue iaculis fermentum. Sed semper quam laoreet nisi egestas at posuere augue semper.","updateAuthor":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"created":"2016-03-16T04:22:37.356+0000","updated":"2016-03-16T04:22:37.356+0000","visibility":{"type":"role","value":"Administrators"}}]},"issuelinks":[{"id":"10001","type":{"id":"10000","name":"Dependent","inward":"depends on","outward":"is depended by"},"outwardIssue":{"id":"10004L","key":"PRJ-2","self":"http://www.example.com/jira/rest/api/2/issue/PRJ-2","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}},{"id":"10002","type":{"id":"10000","name":"Dependent","inward":"depends on","outward":"is depended by"},"inwardIssue":{"id":"10004","key":"PRJ-3","self":"http://www.example.com/jira/rest/api/2/issue/PRJ-3","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}}],"worklog":{"worklogs":[{"self":"http://www.example.com/jira/rest/api/2/issue/10010/worklog/10000","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"updateAuthor":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"comment":"I did some work here.","updated":"2016-03-16T04:22:37.471+0000","visibility":{"type":"group","value":"jira-developers"},"started":"2016-03-16T04:22:37.471+0000","timeSpent":"3h 20m","timeSpentSeconds":12000,"id":"100028","issueId":"10002"}]},"updated":"2016-04-06T02:36:53.594-0700","timetracking":{"originalEstimate":"10m","remainingEstimate":"3m","timeSpent":"6m","originalEstimateSeconds":600,"remainingEstimateSeconds":200,"timeSpentSeconds":400}},"names":{"watcher":"watcher","attachment":"attachment","sub-tasks":"sub-tasks","description":"description","project":"project","comment":"comment","issuelinks":"issuelinks","worklog":"worklog","updated":"updated","timetracking":"timetracking"},"schema":{}}`)
- })
-
- i := &Issue{
- Fields: &IssueFields{
- Description: "example bug report",
- },
- }
- issue, _, err := testClient.Issue.Create(i)
- if issue == nil {
- t.Error("Expected issue. Issue is nil")
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestIssueService_AddComment(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/issue/10000/comment", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, "/rest/api/2/issue/10000/comment")
-
- w.WriteHeader(http.StatusCreated)
- fmt.Fprint(w, `{"self":"http://www.example.com/jira/rest/api/2/issue/10010/comment/10000","id":"10000","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"body":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eget venenatis elit. Duis eu justo eget augue iaculis fermentum. Sed semper quam laoreet nisi egestas at posuere augue semper.","updateAuthor":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"created":"2016-03-16T04:22:37.356+0000","updated":"2016-03-16T04:22:37.356+0000","visibility":{"type":"role","value":"Administrators"}}`)
- })
-
- c := &Comment{
- Body: "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eget venenatis elit. Duis eu justo eget augue iaculis fermentum. Sed semper quam laoreet nisi egestas at posuere augue semper.",
- Visibility: CommentVisibility{
- Type: "role",
- Value: "Administrators",
- },
- }
- comment, _, err := testClient.Issue.AddComment("10000", c)
- if comment == nil {
- t.Error("Expected Comment. Comment is nil")
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestIssueService_AddLink(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/issueLink", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, "/rest/api/2/issueLink")
-
- w.WriteHeader(http.StatusOK)
- })
-
- il := &IssueLink{
- Type: IssueLinkType{
- Name: "Duplicate",
- },
- InwardIssue: &Issue{
- Key: "HSP-1",
- },
- OutwardIssue: &Issue{
- Key: "MKY-1",
- },
- Comment: &Comment{
- Body: "Linked related issue!",
- Visibility: CommentVisibility{
- Type: "group",
- Value: "jira-software-users",
- },
- },
- }
- resp, err := testClient.Issue.AddLink(il)
- if resp == nil {
- t.Error("Expected response. Response is nil")
- }
- if resp.StatusCode != 200 {
- t.Errorf("Expected Status code 200. Given %d", resp.StatusCode)
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestIssueService_Get_Fields(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/issue/10002", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, "/rest/api/2/issue/10002")
-
- fmt.Fprint(w, `{"expand":"renderedFields,names,schema,transitions,operations,editmeta,changelog,versionedRepresentations","id":"10002","self":"http://www.example.com/jira/rest/api/2/issue/10002","key":"EX-1","fields":{"labels":["test"],"watcher":{"self":"http://www.example.com/jira/rest/api/2/issue/EX-1/watchers","isWatching":false,"watchCount":1,"watchers":[{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false}]},"epic": {"id": 19415,"key": "EPIC-77","self": "https://example.atlassian.net/rest/agile/1.0/epic/19415","name": "Epic Name","summary": "Do it","color": {"key": "color_11"},"done": false},"attachment":[{"self":"http://www.example.com/jira/rest/api/2.0/attachments/10000","filename":"picture.jpg","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","avatarUrls":{"48x48":"http://www.example.com/jira/secure/useravatar?size=large&ownerId=fred","24x24":"http://www.example.com/jira/secure/useravatar?size=small&ownerId=fred","16x16":"http://www.example.com/jira/secure/useravatar?size=xsmall&ownerId=fred","32x32":"http://www.example.com/jira/secure/useravatar?size=medium&ownerId=fred"},"displayName":"Fred F. User","active":false},"created":"2016-03-16T04:22:37.461+0000","size":23123,"mimeType":"image/jpeg","content":"http://www.example.com/jira/attachments/10000","thumbnail":"http://www.example.com/jira/secure/thumbnail/10000"}],"sub-tasks":[{"id":"10000","type":{"id":"10000","name":"","inward":"Parent","outward":"Sub-task"},"outwardIssue":{"id":"10003","key":"EX-2","self":"http://www.example.com/jira/rest/api/2/issue/EX-2","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}}],"description":"example bug report","project":{"self":"http://www.example.com/jira/rest/api/2/project/EX","id":"10000","key":"EX","name":"Example","avatarUrls":{"48x48":"http://www.example.com/jira/secure/projectavatar?size=large&pid=10000","24x24":"http://www.example.com/jira/secure/projectavatar?size=small&pid=10000","16x16":"http://www.example.com/jira/secure/projectavatar?size=xsmall&pid=10000","32x32":"http://www.example.com/jira/secure/projectavatar?size=medium&pid=10000"},"projectCategory":{"self":"http://www.example.com/jira/rest/api/2/projectCategory/10000","id":"10000","name":"FIRST","description":"First Project Category"}},"comment":{"comments":[{"self":"http://www.example.com/jira/rest/api/2/issue/10010/comment/10000","id":"10000","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"body":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eget venenatis elit. Duis eu justo eget augue iaculis fermentum. Sed semper quam laoreet nisi egestas at posuere augue semper.","updateAuthor":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"created":"2016-03-16T04:22:37.356+0000","updated":"2016-03-16T04:22:37.356+0000","visibility":{"type":"role","value":"Administrators"}}]},"issuelinks":[{"id":"10001","type":{"id":"10000","name":"Dependent","inward":"depends on","outward":"is depended by"},"outwardIssue":{"id":"10004L","key":"PRJ-2","self":"http://www.example.com/jira/rest/api/2/issue/PRJ-2","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}},{"id":"10002","type":{"id":"10000","name":"Dependent","inward":"depends on","outward":"is depended by"},"inwardIssue":{"id":"10004","key":"PRJ-3","self":"http://www.example.com/jira/rest/api/2/issue/PRJ-3","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}}],"worklog":{"worklogs":[{"self":"http://www.example.com/jira/rest/api/2/issue/10010/worklog/10000","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"updateAuthor":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"comment":"I did some work here.","updated":"2016-03-16T04:22:37.471+0000","visibility":{"type":"group","value":"jira-developers"},"started":"2016-03-16T04:22:37.471+0000","timeSpent":"3h 20m","timeSpentSeconds":12000,"id":"100028","issueId":"10002"}]},"updated":"2016-04-06T02:36:53.594-0700","timetracking":{"originalEstimate":"10m","remainingEstimate":"3m","timeSpent":"6m","originalEstimateSeconds":600,"remainingEstimateSeconds":200,"timeSpentSeconds":400}},"names":{"watcher":"watcher","attachment":"attachment","sub-tasks":"sub-tasks","description":"description","project":"project","comment":"comment","issuelinks":"issuelinks","worklog":"worklog","updated":"updated","timetracking":"timetracking"},"schema":{}}`)
- })
-
- issue, _, err := testClient.Issue.Get("10002")
- if issue == nil {
- t.Error("Expected issue. Issue is nil")
- }
- if !reflect.DeepEqual(issue.Fields.Labels, []string{"test"}) {
- t.Error("Expected labels for the returned issue")
- }
-
- if len(issue.Fields.Comments.Comments) != 1 {
- t.Errorf("Expected one comment, %v found", len(issue.Fields.Comments.Comments))
- }
- if issue.Fields.Epic == nil {
- t.Error("Epic expected but not found")
- }
-
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestIssueService_DownloadAttachment(t *testing.T) {
- var testAttachment = "Here is an attachment"
-
- setup()
- defer teardown()
- testMux.HandleFunc("/secure/attachment/", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, "/secure/attachment/10000/")
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte(testAttachment))
- })
-
- resp, err := testClient.Issue.DownloadAttachment("10000")
- if resp == nil {
- t.Error("Expected response. Response is nil")
- }
- defer resp.Body.Close()
-
- attachment, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Error("Expected attachment text", err)
- }
- if string(attachment) != testAttachment {
- t.Errorf("Expecting an attachment: %s", string(attachment))
- }
-
- if resp.StatusCode != 200 {
- t.Errorf("Expected Status code 200. Given %d", resp.StatusCode)
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestIssueService_DownloadAttachment_BadStatus(t *testing.T) {
-
- setup()
- defer teardown()
- testMux.HandleFunc("/secure/attachment/", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, "/secure/attachment/10000/")
-
- w.WriteHeader(http.StatusForbidden)
- })
-
- resp, err := testClient.Issue.DownloadAttachment("10000")
- if resp == nil {
- t.Error("Expected response. Response is nil")
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != http.StatusForbidden {
- t.Errorf("Expected Status code %d. Given %d", http.StatusForbidden, resp.StatusCode)
- }
- if err == nil {
- t.Errorf("Error expected")
- }
-}
-
-func TestIssueService_PostAttachment(t *testing.T) {
- var testAttachment = "Here is an attachment"
-
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/issue/10000/attachments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, "/rest/api/2/issue/10000/attachments")
- status := http.StatusOK
-
- file, _, err := r.FormFile("file")
- if err != nil {
- status = http.StatusNotAcceptable
- }
- if file == nil {
- status = http.StatusNoContent
- } else {
-
- // Read the file into memory
- data, err := ioutil.ReadAll(file)
- if err != nil {
- status = http.StatusInternalServerError
- }
- if string(data) != testAttachment {
- status = http.StatusNotAcceptable
- }
-
- w.WriteHeader(status)
- fmt.Fprint(w, `[{"self":"http://jira/jira/rest/api/2/attachment/228924","id":"228924","filename":"example.jpg","author":{"self":"http://jira/jira/rest/api/2/user?username=test","name":"test","emailAddress":"test@test.com","avatarUrls":{"16x16":"http://jira/jira/secure/useravatar?size=small&avatarId=10082","48x48":"http://jira/jira/secure/useravatar?avatarId=10082"},"displayName":"Tester","active":true},"created":"2016-05-24T00:25:17.000-0700","size":32280,"mimeType":"image/jpeg","content":"http://jira/jira/secure/attachment/228924/example.jpg","thumbnail":"http://jira/jira/secure/thumbnail/228924/_thumb_228924.png"}]`)
- file.Close()
- }
- })
-
- reader := strings.NewReader(testAttachment)
-
- issue, resp, err := testClient.Issue.PostAttachment("10000", reader, "attachment")
-
- if issue == nil {
- t.Error("Expected response. Response is nil")
- }
-
- if resp.StatusCode != 200 {
- t.Errorf("Expected Status code 200. Given %d", resp.StatusCode)
- }
-
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestIssueService_PostAttachment_NoResponse(t *testing.T) {
- var testAttachment = "Here is an attachment"
-
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/issue/10000/attachments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, "/rest/api/2/issue/10000/attachments")
- w.WriteHeader(http.StatusOK)
- })
- reader := strings.NewReader(testAttachment)
-
- _, _, err := testClient.Issue.PostAttachment("10000", reader, "attachment")
-
- if err == nil {
- t.Errorf("Error expected: %s", err)
- }
-}
-
-func TestIssueService_PostAttachment_NoFilename(t *testing.T) {
- var testAttachment = "Here is an attachment"
-
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/issue/10000/attachments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, "/rest/api/2/issue/10000/attachments")
- w.WriteHeader(http.StatusOK)
- fmt.Fprint(w, `[{"self":"http://jira/jira/rest/api/2/attachment/228924","id":"228924","filename":"example.jpg","author":{"self":"http://jira/jira/rest/api/2/user?username=test","name":"test","emailAddress":"test@test.com","avatarUrls":{"16x16":"http://jira/jira/secure/useravatar?size=small&avatarId=10082","48x48":"http://jira/jira/secure/useravatar?avatarId=10082"},"displayName":"Tester","active":true},"created":"2016-05-24T00:25:17.000-0700","size":32280,"mimeType":"image/jpeg","content":"http://jira/jira/secure/attachment/228924/example.jpg","thumbnail":"http://jira/jira/secure/thumbnail/228924/_thumb_228924.png"}]`)
- })
- reader := strings.NewReader(testAttachment)
-
- _, _, err := testClient.Issue.PostAttachment("10000", reader, "")
-
- if err != nil {
- t.Errorf("Error expected: %s", err)
- }
-}
-
-func TestIssueService_PostAttachment_NoAttachment(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/issue/10000/attachments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, "/rest/api/2/issue/10000/attachments")
- w.WriteHeader(http.StatusOK)
- fmt.Fprint(w, `[{"self":"http://jira/jira/rest/api/2/attachment/228924","id":"228924","filename":"example.jpg","author":{"self":"http://jira/jira/rest/api/2/user?username=test","name":"test","emailAddress":"test@test.com","avatarUrls":{"16x16":"http://jira/jira/secure/useravatar?size=small&avatarId=10082","48x48":"http://jira/jira/secure/useravatar?avatarId=10082"},"displayName":"Tester","active":true},"created":"2016-05-24T00:25:17.000-0700","size":32280,"mimeType":"image/jpeg","content":"http://jira/jira/secure/attachment/228924/example.jpg","thumbnail":"http://jira/jira/secure/thumbnail/228924/_thumb_228924.png"}]`)
- })
-
- _, _, err := testClient.Issue.PostAttachment("10000", nil, "attachment")
-
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestIssueService_Search(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/search", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, "/rest/api/2/search?jql=something&startAt=1&maxResults=40")
- w.WriteHeader(http.StatusOK)
- fmt.Fprint(w, `{"expand": "schema,names","startAt": 1,"maxResults": 40,"total": 6,"issues": [{"expand": "html","id": "10230","self": "http://kelpie9:8081/rest/api/2/issue/BULK-62","key": "BULK-62","fields": {"summary": "testing","timetracking": null,"issuetype": {"self": "http://kelpie9:8081/rest/api/2/issuetype/5","id": "5","description": "The sub-task of the issue","iconUrl": "http://kelpie9:8081/images/icons/issue_subtask.gif","name": "Sub-task","subtask": true},"customfield_10071": null}},{"expand": "html","id": "10004","self": "http://kelpie9:8081/rest/api/2/issue/BULK-47","key": "BULK-47","fields": {"summary": "Cheese v1 2.0 issue","timetracking": null,"issuetype": {"self": "http://kelpie9:8081/rest/api/2/issuetype/3","id": "3","description": "A task that needs to be done.","iconUrl": "http://kelpie9:8081/images/icons/task.gif","name": "Task","subtask": false}}}]}`)
- })
-
- opt := &SearchOptions{StartAt: 1, MaxResults: 40}
- _, resp, err := testClient.Issue.Search("something", opt)
-
- if resp == nil {
- t.Errorf("Response given: %+v", resp)
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-
- if resp.StartAt != 1 {
- t.Errorf("StartAt should populate with 1, %v given", resp.StartAt)
- }
- if resp.MaxResults != 40 {
- t.Errorf("StartAt should populate with 40, %v given", resp.MaxResults)
- }
- if resp.Total != 6 {
- t.Errorf("StartAt should populate with 6, %v given", resp.Total)
- }
-}
-
-func TestIssueService_Search_WithoutPaging(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/search", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, "/rest/api/2/search?jql=something")
- w.WriteHeader(http.StatusOK)
- fmt.Fprint(w, `{"expand": "schema,names","startAt": 0,"maxResults": 50,"total": 6,"issues": [{"expand": "html","id": "10230","self": "http://kelpie9:8081/rest/api/2/issue/BULK-62","key": "BULK-62","fields": {"summary": "testing","timetracking": null,"issuetype": {"self": "http://kelpie9:8081/rest/api/2/issuetype/5","id": "5","description": "The sub-task of the issue","iconUrl": "http://kelpie9:8081/images/icons/issue_subtask.gif","name": "Sub-task","subtask": true},"customfield_10071": null}},{"expand": "html","id": "10004","self": "http://kelpie9:8081/rest/api/2/issue/BULK-47","key": "BULK-47","fields": {"summary": "Cheese v1 2.0 issue","timetracking": null,"issuetype": {"self": "http://kelpie9:8081/rest/api/2/issuetype/3","id": "3","description": "A task that needs to be done.","iconUrl": "http://kelpie9:8081/images/icons/task.gif","name": "Task","subtask": false}}}]}`)
- })
-
- _, resp, err := testClient.Issue.Search("something", nil)
-
- if resp == nil {
- t.Errorf("Response given: %+v", resp)
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-
- if resp.StartAt != 0 {
- t.Errorf("StartAt should populate with 0, %v given", resp.StartAt)
- }
- if resp.MaxResults != 50 {
- t.Errorf("StartAt should populate with 50, %v given", resp.MaxResults)
- }
- if resp.Total != 6 {
- t.Errorf("StartAt should populate with 6, %v given", resp.Total)
- }
-}
-
-func TestIssueService_GetCustomFields(t *testing.T) {
- setup()
- defer teardown()
- testMux.HandleFunc("/rest/api/2/issue/10002", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, "/rest/api/2/issue/10002")
- fmt.Fprint(w, `{"expand":"renderedFields,names,schema,transitions,operations,editmeta,changelog,versionedRepresentations","id":"10002","self":"http://www.example.com/jira/rest/api/2/issue/10002","key":"EX-1","fields":{"customfield_123":"test","watcher":{"self":"http://www.example.com/jira/rest/api/2/issue/EX-1/watchers","isWatching":false,"watchCount":1,"watchers":[{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false}]},"attachment":[{"self":"http://www.example.com/jira/rest/api/2.0/attachments/10000","filename":"picture.jpg","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","avatarUrls":{"48x48":"http://www.example.com/jira/secure/useravatar?size=large&ownerId=fred","24x24":"http://www.example.com/jira/secure/useravatar?size=small&ownerId=fred","16x16":"http://www.example.com/jira/secure/useravatar?size=xsmall&ownerId=fred","32x32":"http://www.example.com/jira/secure/useravatar?size=medium&ownerId=fred"},"displayName":"Fred F. User","active":false},"created":"2016-03-16T04:22:37.461+0000","size":23123,"mimeType":"image/jpeg","content":"http://www.example.com/jira/attachments/10000","thumbnail":"http://www.example.com/jira/secure/thumbnail/10000"}],"sub-tasks":[{"id":"10000","type":{"id":"10000","name":"","inward":"Parent","outward":"Sub-task"},"outwardIssue":{"id":"10003","key":"EX-2","self":"http://www.example.com/jira/rest/api/2/issue/EX-2","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}}],"description":"example bug report","project":{"self":"http://www.example.com/jira/rest/api/2/project/EX","id":"10000","key":"EX","name":"Example","avatarUrls":{"48x48":"http://www.example.com/jira/secure/projectavatar?size=large&pid=10000","24x24":"http://www.example.com/jira/secure/projectavatar?size=small&pid=10000","16x16":"http://www.example.com/jira/secure/projectavatar?size=xsmall&pid=10000","32x32":"http://www.example.com/jira/secure/projectavatar?size=medium&pid=10000"},"projectCategory":{"self":"http://www.example.com/jira/rest/api/2/projectCategory/10000","id":"10000","name":"FIRST","description":"First Project Category"}},"comment":{"comments":[{"self":"http://www.example.com/jira/rest/api/2/issue/10010/comment/10000","id":"10000","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"body":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque eget venenatis elit. Duis eu justo eget augue iaculis fermentum. Sed semper quam laoreet nisi egestas at posuere augue semper.","updateAuthor":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"created":"2016-03-16T04:22:37.356+0000","updated":"2016-03-16T04:22:37.356+0000","visibility":{"type":"role","value":"Administrators"}}]},"issuelinks":[{"id":"10001","type":{"id":"10000","name":"Dependent","inward":"depends on","outward":"is depended by"},"outwardIssue":{"id":"10004L","key":"PRJ-2","self":"http://www.example.com/jira/rest/api/2/issue/PRJ-2","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}},{"id":"10002","type":{"id":"10000","name":"Dependent","inward":"depends on","outward":"is depended by"},"inwardIssue":{"id":"10004","key":"PRJ-3","self":"http://www.example.com/jira/rest/api/2/issue/PRJ-3","fields":{"status":{"iconUrl":"http://www.example.com/jira//images/icons/statuses/open.png","name":"Open"}}}}],"worklog":{"worklogs":[{"self":"http://www.example.com/jira/rest/api/2/issue/10010/worklog/10000","author":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"updateAuthor":{"self":"http://www.example.com/jira/rest/api/2/user?username=fred","name":"fred","displayName":"Fred F. User","active":false},"comment":"I did some work here.","updated":"2016-03-16T04:22:37.471+0000","visibility":{"type":"group","value":"jira-developers"},"started":"2016-03-16T04:22:37.471+0000","timeSpent":"3h 20m","timeSpentSeconds":12000,"id":"100028","issueId":"10002"}]},"updated":"2016-04-06T02:36:53.594-0700","timetracking":{"originalEstimate":"10m","remainingEstimate":"3m","timeSpent":"6m","originalEstimateSeconds":600,"remainingEstimateSeconds":200,"timeSpentSeconds":400}},"names":{"watcher":"watcher","attachment":"attachment","sub-tasks":"sub-tasks","description":"description","project":"project","comment":"comment","issuelinks":"issuelinks","worklog":"worklog","updated":"updated","timetracking":"timetracking"},"schema":{}}`)
- })
-
- issue, _, err := testClient.Issue.GetCustomFields("10002")
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
- if issue == nil {
- t.Error("Expected Customfields")
- }
- cf := issue["customfield_123"]
- if cf != "test" {
- t.Error("Expected \"test\" for custom field")
- }
-}
-
-func TestIssueService_GetTransitions(t *testing.T) {
- setup()
- defer teardown()
-
- testAPIEndpoint := "/rest/api/2/issue/123/transitions"
-
- raw, err := ioutil.ReadFile("./mocks/transitions.json")
- if err != nil {
- t.Error(err.Error())
- }
-
- testMux.HandleFunc(testAPIEndpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, testAPIEndpoint)
- fmt.Fprint(w, string(raw))
- })
-
- transitions, _, err := testClient.Issue.GetTransitions("123")
-
- if err != nil {
- t.Errorf("Got error: %v", err)
- }
-
- if transitions == nil {
- t.Error("Expected transition list. Got nil.")
- }
-
- if len(transitions) != 2 {
- t.Errorf("Expected 2 transitions. Got %d", len(transitions))
- }
-
- if transitions[0].Fields["summary"].Required != false {
- t.Errorf("First transition summary field should not be required")
- }
-}
-
-func TestIssueService_DoTransition(t *testing.T) {
- setup()
- defer teardown()
-
- testAPIEndpoint := "/rest/api/2/issue/123/transitions"
-
- transitionID := "22"
-
- testMux.HandleFunc(testAPIEndpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, testAPIEndpoint)
-
- decoder := json.NewDecoder(r.Body)
- var payload CreateTransitionPayload
- err := decoder.Decode(&payload)
- if err != nil {
- t.Errorf("Got error: %v", err)
- }
-
- if payload.Transition.ID != transitionID {
- t.Errorf("Expected %s to be in payload, got %s instead", transitionID, payload.Transition.ID)
- }
- })
- _, err := testClient.Issue.DoTransition("123", transitionID)
-
- if err != nil {
- t.Errorf("Got error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/jira.go b/vendor/src/github.com/andygrunwald/go-jira/jira.go
deleted file mode 100644
index 70b7e6e..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/jira.go
+++ /dev/null
@@ -1,224 +0,0 @@
-package jira
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "reflect"
-
- "github.com/google/go-querystring/query"
-)
-
-// A Client manages communication with the JIRA API.
-type Client struct {
- // HTTP client used to communicate with the API.
- client *http.Client
-
- // Base URL for API requests.
- baseURL *url.URL
-
- // Session storage if the user authentificate with a Session cookie
- session *Session
-
- // Services used for talking to different parts of the JIRA API.
- Authentication *AuthenticationService
- Issue *IssueService
- Project *ProjectService
- Board *BoardService
- Sprint *SprintService
-}
-
-// NewClient returns a new JIRA API client.
-// If a nil httpClient is provided, http.DefaultClient will be used.
-// To use API methods which require authentication you can follow the preferred solution and
-// provide an http.Client that will perform the authentication for you with OAuth and HTTP Basic (such as that provided by the golang.org/x/oauth2 library).
-// As an alternative you can use Session Cookie based authentication provided by this package as well.
-// See https://docs.atlassian.com/jira/REST/latest/#authentication
-// baseURL is the HTTP endpoint of your JIRA instance and should always be specified with a trailing slash.
-func NewClient(httpClient *http.Client, baseURL string) (*Client, error) {
- if httpClient == nil {
- httpClient = http.DefaultClient
- }
-
- parsedBaseURL, err := url.Parse(baseURL)
- if err != nil {
- return nil, err
- }
-
- c := &Client{
- client: httpClient,
- baseURL: parsedBaseURL,
- }
- c.Authentication = &AuthenticationService{client: c}
- c.Issue = &IssueService{client: c}
- c.Project = &ProjectService{client: c}
- c.Board = &BoardService{client: c}
- c.Sprint = &SprintService{client: c}
-
- return c, nil
-}
-
-// NewRequest creates an API request.
-// A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client.
-// Relative URLs should always be specified without a preceding slash.
-// If specified, the value pointed to by body is JSON encoded and included as the request body.
-func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) {
- rel, err := url.Parse(urlStr)
- if err != nil {
- return nil, err
- }
-
- u := c.baseURL.ResolveReference(rel)
-
- var buf io.ReadWriter
- if body != nil {
- buf = new(bytes.Buffer)
- err := json.NewEncoder(buf).Encode(body)
- if err != nil {
- return nil, err
- }
- }
-
- req, err := http.NewRequest(method, u.String(), buf)
- if err != nil {
- return nil, err
- }
-
- req.Header.Set("Content-Type", "application/json")
-
- // Set session cookie if there is one
- if c.session != nil {
- for _, cookie := range c.session.Cookies {
- req.AddCookie(cookie)
- }
- }
-
- return req, nil
-}
-
-// addOptions adds the parameters in opt as URL query parameters to s. opt
-// must be a struct whose fields may contain "url" tags.
-func addOptions(s string, opt interface{}) (string, error) {
- v := reflect.ValueOf(opt)
- if v.Kind() == reflect.Ptr && v.IsNil() {
- return s, nil
- }
-
- u, err := url.Parse(s)
- if err != nil {
- return s, err
- }
-
- qs, err := query.Values(opt)
- if err != nil {
- return s, err
- }
-
- u.RawQuery = qs.Encode()
- return u.String(), nil
-}
-
-// NewMultiPartRequest creates an API request including a multi-part file.
-// A relative URL can be provided in urlStr, in which case it is resolved relative to the baseURL of the Client.
-// Relative URLs should always be specified without a preceding slash.
-// If specified, the value pointed to by buf is a multipart form.
-func (c *Client) NewMultiPartRequest(method, urlStr string, buf *bytes.Buffer) (*http.Request, error) {
- rel, err := url.Parse(urlStr)
- if err != nil {
- return nil, err
- }
-
- u := c.baseURL.ResolveReference(rel)
-
- req, err := http.NewRequest(method, u.String(), buf)
- if err != nil {
- return nil, err
- }
-
- // Set required headers
- req.Header.Set("X-Atlassian-Token", "nocheck")
-
- // Set session cookie if there is one
- if c.session != nil {
- for _, cookie := range c.session.Cookies {
- req.AddCookie(cookie)
- }
- }
-
- return req, nil
-}
-
-// Do sends an API request and returns the API response.
-// The API response is JSON decoded and stored in the value pointed to by v, or returned as an error if an API error has occurred.
-func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
- httpResp, err := c.client.Do(req)
- if err != nil {
- return nil, err
- }
-
- err = CheckResponse(httpResp)
- if err != nil {
- // Even though there was an error, we still return the response
- // in case the caller wants to inspect it further
- return newResponse(httpResp, nil), err
- }
-
- if v != nil {
- // Open a NewDecoder and defer closing the reader only if there is a provided interface to decode to
- defer httpResp.Body.Close()
- err = json.NewDecoder(httpResp.Body).Decode(v)
- }
-
- resp := newResponse(httpResp, v)
- return resp, err
-}
-
-// CheckResponse checks the API response for errors, and returns them if present.
-// A response is considered an error if it has a status code outside the 200 range.
-// The caller is responsible to analyze the response body.
-// The body can contain JSON (if the error is intended) or xml (sometimes JIRA just failes).
-func CheckResponse(r *http.Response) error {
- if c := r.StatusCode; 200 <= c && c <= 299 {
- return nil
- }
-
- err := fmt.Errorf("Request failed. Please analyze the request body for more details. Status code: %d", r.StatusCode)
- return err
-}
-
-// GetBaseURL will return you the Base URL.
-// This is the same URL as in the NewClient constructor
-func (c *Client) GetBaseURL() url.URL {
- return *c.baseURL
-}
-
-// Response represents JIRA API response. It wraps http.Response returned from
-// API and provides information about paging.
-type Response struct {
- *http.Response
-
- StartAt int
- MaxResults int
- Total int
-}
-
-func newResponse(r *http.Response, v interface{}) *Response {
- resp := &Response{Response: r}
- resp.populatePageValues(v)
- return resp
-}
-
-// Sets paging values if response json was parsed to searchResult type
-// (can be extended with other types if they also need paging info)
-func (r *Response) populatePageValues(v interface{}) {
- switch value := v.(type) {
- case *searchResult:
- r.StartAt = value.StartAt
- r.MaxResults = value.MaxResults
- r.Total = value.Total
- }
- return
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/jira_test.go b/vendor/src/github.com/andygrunwald/go-jira/jira_test.go
deleted file mode 100644
index 7e6de1d..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/jira_test.go
+++ /dev/null
@@ -1,390 +0,0 @@
-package jira
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "net/url"
- "reflect"
- "strings"
- "testing"
- "time"
-)
-
-const (
- testJIRAInstanceURL = "https://issues.apache.org/jira/"
-)
-
-var (
- // testMux is the HTTP request multiplexer used with the test server.
- testMux *http.ServeMux
-
- // testClient is the JIRA client being tested.
- testClient *Client
-
- // testServer is a test HTTP server used to provide mock API responses.
- testServer *httptest.Server
-)
-
-type testValues map[string]string
-
-// setup sets up a test HTTP server along with a jira.Client that is configured to talk to that test server.
-// Tests should register handlers on mux which provide mock responses for the API method being tested.
-func setup() {
- // Test server
- testMux = http.NewServeMux()
- testServer = httptest.NewServer(testMux)
-
- // jira client configured to use test server
- testClient, _ = NewClient(nil, testServer.URL)
-}
-
-// teardown closes the test HTTP server.
-func teardown() {
- testServer.Close()
-}
-
-func testMethod(t *testing.T, r *http.Request, want string) {
- if got := r.Method; got != want {
- t.Errorf("Request method: %v, want %v", got, want)
- }
-}
-
-func testRequestURL(t *testing.T, r *http.Request, want string) {
- if got := r.URL.String(); !strings.HasPrefix(got, want) {
- t.Errorf("Request URL: %v, want %v", got, want)
- }
-}
-
-func TestNewClient_WrongUrl(t *testing.T) {
- c, err := NewClient(nil, "://issues.apache.org/jira/")
-
- if err == nil {
- t.Error("Expected an error. Got none")
- }
- if c != nil {
- t.Errorf("Expected no client. Got %+v", c)
- }
-}
-
-func TestNewClient_WithHttpClient(t *testing.T) {
- httpClient := http.DefaultClient
- httpClient.Timeout = 10 * time.Minute
- c, err := NewClient(httpClient, testJIRAInstanceURL)
-
- if err != nil {
- t.Errorf("Got an error: %s", err)
- }
- if c == nil {
- t.Error("Expected a client. Got none")
- }
- if !reflect.DeepEqual(c.client, httpClient) {
- t.Errorf("HTTP clients are not equal. Injected %+v, got %+v", httpClient, c.client)
- }
-}
-
-func TestNewClient_WithServices(t *testing.T) {
- c, err := NewClient(nil, testJIRAInstanceURL)
-
- if err != nil {
- t.Errorf("Got an error: %s", err)
- }
- if c.Authentication == nil {
- t.Error("No AuthenticationService provided")
- }
- if c.Issue == nil {
- t.Error("No IssueService provided")
- }
- if c.Project == nil {
- t.Error("No ProjectService provided")
- }
- if c.Board == nil {
- t.Error("No BoardService provided")
- }
- if c.Sprint == nil {
- t.Error("No SprintService provided")
- }
-}
-
-func TestCheckResponse(t *testing.T) {
- codes := []int{
- http.StatusOK, http.StatusPartialContent, 299,
- }
-
- for _, c := range codes {
- r := &http.Response{
- StatusCode: c,
- }
- if err := CheckResponse(r); err != nil {
- t.Errorf("CheckResponse throws an error: %s", err)
- }
- }
-}
-
-func TestClient_NewRequest(t *testing.T) {
- c, err := NewClient(nil, testJIRAInstanceURL)
- if err != nil {
- t.Errorf("An error occured. Expected nil. Got %+v.", err)
- }
-
- inURL, outURL := "rest/api/2/issue/", testJIRAInstanceURL+"rest/api/2/issue/"
- inBody, outBody := &Issue{Key: "MESOS"}, `{"key":"MESOS"}`+"\n"
- req, _ := c.NewRequest("GET", inURL, inBody)
-
- // Test that relative URL was expanded
- if got, want := req.URL.String(), outURL; got != want {
- t.Errorf("NewRequest(%q) URL is %v, want %v", inURL, got, want)
- }
-
- // Test that body was JSON encoded
- body, _ := ioutil.ReadAll(req.Body)
- if got, want := string(body), outBody; got != want {
- t.Errorf("NewRequest(%v) Body is %v, want %v", inBody, got, want)
- }
-}
-
-func TestClient_NewRequest_InvalidJSON(t *testing.T) {
- c, err := NewClient(nil, testJIRAInstanceURL)
- if err != nil {
- t.Errorf("An error occured. Expected nil. Got %+v.", err)
- }
-
- type T struct {
- A map[int]interface{}
- }
- _, err = c.NewRequest("GET", "/", &T{})
-
- if err == nil {
- t.Error("Expected error to be returned.")
- }
- if err, ok := err.(*json.UnsupportedTypeError); !ok {
- t.Errorf("Expected a JSON error; got %+v.", err)
- }
-}
-
-func testURLParseError(t *testing.T, err error) {
- if err == nil {
- t.Errorf("Expected error to be returned")
- }
- if err, ok := err.(*url.Error); !ok || err.Op != "parse" {
- t.Errorf("Expected URL parse error, got %+v", err)
- }
-}
-
-func TestClient_NewRequest_BadURL(t *testing.T) {
- c, err := NewClient(nil, testJIRAInstanceURL)
- if err != nil {
- t.Errorf("An error occured. Expected nil. Got %+v.", err)
- }
- _, err = c.NewRequest("GET", ":", nil)
- testURLParseError(t, err)
-}
-
-func TestClient_NewRequest_SessionCookies(t *testing.T) {
- c, err := NewClient(nil, testJIRAInstanceURL)
- if err != nil {
- t.Errorf("An error occured. Expected nil. Got %+v.", err)
- }
-
- cookie := &http.Cookie{Name: "testcookie", Value: "testvalue"}
- c.session = &Session{Cookies: []*http.Cookie{cookie}}
-
- inURL := "rest/api/2/issue/"
- inBody := &Issue{Key: "MESOS"}
- req, err := c.NewRequest("GET", inURL, inBody)
-
- if err != nil {
- t.Errorf("An error occured. Expected nil. Got %+v.", err)
- }
-
- if len(req.Cookies()) != len(c.session.Cookies) {
- t.Errorf("An error occured. Expected %d cookie(s). Got %d.", len(c.session.Cookies), len(req.Cookies()))
- }
-
- for i, v := range req.Cookies() {
- if v.String() != c.session.Cookies[i].String() {
- t.Errorf("An error occured. Unexpected cookie. Expected %s, actual %s.", v.String(), c.session.Cookies[i].String())
- }
- }
-}
-
-// If a nil body is passed to gerrit.NewRequest, make sure that nil is also passed to http.NewRequest.
-// In most cases, passing an io.Reader that returns no content is fine,
-// since there is no difference between an HTTP request body that is an empty string versus one that is not set at all.
-// However in certain cases, intermediate systems may treat these differently resulting in subtle errors.
-func TestClient_NewRequest_EmptyBody(t *testing.T) {
- c, err := NewClient(nil, testJIRAInstanceURL)
- if err != nil {
- t.Errorf("An error occured. Expected nil. Got %+v.", err)
- }
- req, err := c.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatalf("NewRequest returned unexpected error: %v", err)
- }
- if req.Body != nil {
- t.Fatalf("constructed request contains a non-nil Body")
- }
-}
-
-func TestClient_NewMultiPartRequest(t *testing.T) {
- c, err := NewClient(nil, testJIRAInstanceURL)
- if err != nil {
- t.Errorf("An error occured. Expected nil. Got %+v.", err)
- }
-
- cookie := &http.Cookie{Name: "testcookie", Value: "testvalue"}
- c.session = &Session{Cookies: []*http.Cookie{cookie}}
-
- inURL := "rest/api/2/issue/"
- inBuf := bytes.NewBufferString("teststring")
- req, err := c.NewMultiPartRequest("GET", inURL, inBuf)
-
- if err != nil {
- t.Errorf("An error occured. Expected nil. Got %+v.", err)
- }
-
- if len(req.Cookies()) != len(c.session.Cookies) {
- t.Errorf("An error occured. Expected %d cookie(s). Got %d.", len(c.session.Cookies), len(req.Cookies()))
- }
-
- for i, v := range req.Cookies() {
- if v.String() != c.session.Cookies[i].String() {
- t.Errorf("An error occured. Unexpected cookie. Expected %s, actual %s.", v.String(), c.session.Cookies[i].String())
- }
- }
-
- if req.Header.Get("X-Atlassian-Token") != "nocheck" {
- t.Errorf("An error occured. Unexpected X-Atlassian-Token header value. Expected nocheck, actual %s.", req.Header.Get("X-Atlassian-Token"))
- }
-}
-
-func TestClient_Do(t *testing.T) {
- setup()
- defer teardown()
-
- type foo struct {
- A string
- }
-
- testMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- if m := "GET"; m != r.Method {
- t.Errorf("Request method = %v, want %v", r.Method, m)
- }
- fmt.Fprint(w, `{"A":"a"}`)
- })
-
- req, _ := testClient.NewRequest("GET", "/", nil)
- body := new(foo)
- testClient.Do(req, body)
-
- want := &foo{"a"}
- if !reflect.DeepEqual(body, want) {
- t.Errorf("Response body = %v, want %v", body, want)
- }
-}
-
-func TestClient_Do_HTTPResponse(t *testing.T) {
- setup()
- defer teardown()
-
- type foo struct {
- A string
- }
-
- testMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- if m := "GET"; m != r.Method {
- t.Errorf("Request method = %v, want %v", r.Method, m)
- }
- fmt.Fprint(w, `{"A":"a"}`)
- })
-
- req, _ := testClient.NewRequest("GET", "/", nil)
- res, _ := testClient.Do(req, nil)
- _, err := ioutil.ReadAll(res.Body)
-
- if err != nil {
- t.Errorf("Error on parsing HTTP Response = %v", err.Error())
- } else if res.StatusCode != 200 {
- t.Errorf("Response code = %v, want %v", res.StatusCode, 200)
- }
-}
-
-func TestClient_Do_HTTPError(t *testing.T) {
- setup()
- defer teardown()
-
- testMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- http.Error(w, "Bad Request", 400)
- })
-
- req, _ := testClient.NewRequest("GET", "/", nil)
- _, err := testClient.Do(req, nil)
-
- if err == nil {
- t.Error("Expected HTTP 400 error.")
- }
-}
-
-// Test handling of an error caused by the internal http client's Do() function.
-// A redirect loop is pretty unlikely to occur within the Gerrit API, but does allow us to exercise the right code path.
-func TestClient_Do_RedirectLoop(t *testing.T) {
- setup()
- defer teardown()
-
- testMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- http.Redirect(w, r, "/", http.StatusFound)
- })
-
- req, _ := testClient.NewRequest("GET", "/", nil)
- _, err := testClient.Do(req, nil)
-
- if err == nil {
- t.Error("Expected error to be returned.")
- }
- if err, ok := err.(*url.Error); !ok {
- t.Errorf("Expected a URL error; got %+v.", err)
- }
-}
-
-func TestClient_GetBaseURL_WithURL(t *testing.T) {
- u, err := url.Parse(testJIRAInstanceURL)
- if err != nil {
- t.Errorf("URL parsing -> Got an error: %s", err)
- }
-
- c, err := NewClient(nil, testJIRAInstanceURL)
- if err != nil {
- t.Errorf("Client creation -> Got an error: %s", err)
- }
- if c == nil {
- t.Error("Expected a client. Got none")
- }
-
- if b := c.GetBaseURL(); !reflect.DeepEqual(b, *u) {
- t.Errorf("Base URLs are not equal. Expected %+v, got %+v", *u, b)
- }
-}
-
-func TestClient_Do_PagingInfoEmptyByDefault(t *testing.T) {
- c, _ := NewClient(nil, testJIRAInstanceURL)
- req, _ := c.NewRequest("GET", "/", nil)
- type foo struct {
- A string
- }
- body := new(foo)
-
- resp, _ := c.Do(req, body)
-
- if resp.StartAt != 0 {
- t.Errorf("StartAt not equal to 0")
- }
- if resp.MaxResults != 0 {
- t.Errorf("StartAt not equal to 0")
- }
- if resp.Total != 0 {
- t.Errorf("StartAt not equal to 0")
- }
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/mocks/all_boards.json b/vendor/src/github.com/andygrunwald/go-jira/mocks/all_boards.json
deleted file mode 100644
index 2065cb6..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/mocks/all_boards.json
+++ /dev/null
@@ -1,43 +0,0 @@
-{
- "maxResults": 50,
- "startAt": 0,
- "isLast": true,
- "values": [
- {
- "id": 4,
- "self": "https://test.jira.org/rest/agile/1.0/board/4",
- "name": "Test Weekly",
- "type": "scrum"
- },
- {
- "id": 5,
- "self": "https://test.jira.org/rest/agile/1.0/board/5",
- "name": "Test Production Support",
- "type": "kanban"
- },
- {
- "id": 6,
- "self": "https://test.jira.org/rest/agile/1.0/board/6",
- "name": "Test To Give",
- "type": "kanban"
- },
- {
- "id": 7,
- "self": "https://test.jira.org/rest/agile/1.0/board/7",
- "name": "Test Journey App",
- "type": "kanban"
- },
- {
- "id": 9,
- "self": "https://test.jira.org/rest/agile/1.0/board/9",
- "name": "Testix",
- "type": "scrum"
- },
- {
- "id": 1,
- "self": "https://test.jira.org/rest/agile/1.0/board/1",
- "name": "Test Mobile",
- "type": "scrum"
- }
- ]
-}
\ No newline at end of file
diff --git a/vendor/src/github.com/andygrunwald/go-jira/mocks/all_boards_filtered.json b/vendor/src/github.com/andygrunwald/go-jira/mocks/all_boards_filtered.json
deleted file mode 100644
index 545f8a8..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/mocks/all_boards_filtered.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
- "maxResults": 10,
- "startAt": 1,
- "isLast": true,
- "values": [
- {
- "id": 4,
- "self": "https://test.jira.org/rest/agile/1.0/board/4",
- "name": "Test Weekly",
- "type": "scrum"
- },
- {
- "id": 9,
- "self": "https://test.jira.org/rest/agile/1.0/board/9",
- "name": "Testix",
- "type": "scrum"
- },
- {
- "id": 1,
- "self": "https://test.jira.org/rest/agile/1.0/board/1",
- "name": "Test Mobile",
- "type": "scrum"
- }
- ]
-}
\ No newline at end of file
diff --git a/vendor/src/github.com/andygrunwald/go-jira/mocks/all_projects.json b/vendor/src/github.com/andygrunwald/go-jira/mocks/all_projects.json
deleted file mode 100644
index 32574c3..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/mocks/all_projects.json
+++ /dev/null
@@ -1,9872 +0,0 @@
-[
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10730",
- "id": "10730",
- "key": "AGILA",
- "name": " Agila",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10730&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10730&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10730&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10730&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12320120",
- "id": "12320120",
- "key": "AAR",
- "name": "aardvark",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12320120&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12320120&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12320120&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12320120&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310505",
- "id": "12310505",
- "key": "ABDERA",
- "name": "Abdera",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310505&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310505&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310505&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310505&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312121",
- "id": "12312121",
- "key": "ACCUMULO",
- "name": "Accumulo",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312121&avatarId=16462",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312121&avatarId=16462",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312121&avatarId=16462",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312121&avatarId=16462"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310931",
- "id": "12310931",
- "key": "ACE",
- "name": "ACE",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310931&avatarId=17543",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310931&avatarId=17543",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310931&avatarId=17543",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310931&avatarId=17543"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10760",
- "id": "10760",
- "description": "Apache Ace related",
- "name": "Ace"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311200",
- "id": "12311200",
- "key": "ACL",
- "name": "ActiveCluster",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311200&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311200&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311200&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311200&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311210",
- "id": "12311210",
- "key": "AMQ",
- "name": "ActiveMQ",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311210&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311210&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311210&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311210&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11160",
- "id": "11160",
- "description": "ActiveMQ",
- "name": "ActiveMQ"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311201",
- "id": "12311201",
- "key": "AMQNET",
- "name": "ActiveMQ .Net",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311201&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311201&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311201&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311201&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11160",
- "id": "11160",
- "description": "ActiveMQ",
- "name": "ActiveMQ"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311310",
- "id": "12311310",
- "key": "APLO",
- "name": "ActiveMQ Apollo",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311310&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311310&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311310&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311310&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11160",
- "id": "11160",
- "description": "ActiveMQ",
- "name": "ActiveMQ"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315920",
- "id": "12315920",
- "key": "ARTEMIS",
- "name": "ActiveMQ Artemis",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315920&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315920&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315920&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315920&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11160",
- "id": "11160",
- "description": "ActiveMQ",
- "name": "ActiveMQ"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311207",
- "id": "12311207",
- "key": "AMQCPP",
- "name": "ActiveMQ C++ Client",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311207&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311207&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311207&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311207&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11160",
- "id": "11160",
- "description": "ActiveMQ",
- "name": "ActiveMQ"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315620",
- "id": "12315620",
- "key": "OPENWIRE",
- "name": "ActiveMQ OpenWire",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315620&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315620&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315620&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315620&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11160",
- "id": "11160",
- "description": "ActiveMQ",
- "name": "ActiveMQ"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311204",
- "id": "12311204",
- "key": "BLAZE",
- "name": "ActiveRealTime",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311204&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311204&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311204&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311204&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11160",
- "id": "11160",
- "description": "ActiveMQ",
- "name": "ActiveMQ"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310060",
- "id": "12310060",
- "key": "ADDR",
- "name": "Addressing",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310060&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310060&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310060&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310060&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311302",
- "id": "12311302",
- "key": "AIRAVATA",
- "name": "Airavata",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311302&avatarId=12756",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311302&avatarId=12756",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311302&avatarId=12756",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311302&avatarId=12756"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311173",
- "id": "12311173",
- "key": "ALOIS",
- "name": "ALOIS",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311173&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311173&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311173&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311173&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10101",
- "id": "10101",
- "key": "ARMI",
- "name": "AltRMI",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10101&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10101&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10101&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10101&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312020",
- "id": "12312020",
- "key": "AMBARI",
- "name": "Ambari",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312020&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312020&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312020&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312020&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311100",
- "id": "12311100",
- "key": "AMBER",
- "name": "Amber",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311100&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311100&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311100&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311100&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310632",
- "id": "12310632",
- "key": "ANAKIA",
- "name": "Anakia",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310632&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310632&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310632&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310632&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10200",
- "id": "10200",
- "description": "Apache Velocity related projects",
- "name": "Velocity"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12320023",
- "id": "12320023",
- "key": "AIRFLOW",
- "name": "Apache Airflow",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12320023&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12320023&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12320023&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12320023&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312323",
- "id": "12312323",
- "key": "ANY23",
- "name": "Apache Any23",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312323&avatarId=14539",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312323&avatarId=14539",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312323&avatarId=14539",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312323&avatarId=14539"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11560",
- "id": "11560",
- "description": "Any23 related",
- "name": "Any23"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318823",
- "id": "12318823",
- "key": "APEXCORE",
- "name": "Apache Apex Core",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318823&avatarId=25655",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318823&avatarId=25655",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318823&avatarId=25655",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318823&avatarId=25655"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318824",
- "id": "12318824",
- "key": "APEXMALHAR",
- "name": "Apache Apex Malhar",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318824&avatarId=25834",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318824&avatarId=25834",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318824&avatarId=25834",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318824&avatarId=25834"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319525",
- "id": "12319525",
- "key": "ARROW",
- "name": "Apache Arrow",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319525&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319525&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319525&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319525&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13960",
- "id": "13960",
- "description": "Apache Arrow",
- "name": "Arrow"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316420",
- "id": "12316420",
- "key": "ASTERIXDB",
- "name": "Apache AsterixDB",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316420&avatarId=24741",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316420&avatarId=24741",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316420&avatarId=24741",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316420&avatarId=24741"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313220",
- "id": "12313220",
- "key": "AWF",
- "name": "Apache AWF",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313220&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313721",
- "id": "12313721",
- "key": "BLUR",
- "name": "Apache Blur",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313721&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313721&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313721&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313721&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318520",
- "id": "12318520",
- "key": "CMDA",
- "name": "Apache Climate Model Diagnostic Analyzer",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318520&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318520&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318520&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318520&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316620",
- "id": "12316620",
- "key": "COMMONSRDF",
- "name": "Apache Commons RDF",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316620&avatarId=26863",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316620&avatarId=26863",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316620&avatarId=26863",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316620&avatarId=26863"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319120",
- "id": "12319120",
- "key": "CONCERTED",
- "name": "Apache Concerted",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319120&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319120&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319120&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319120&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312420",
- "id": "12312420",
- "key": "CB",
- "name": "Apache Cordova",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312420&avatarId=15888",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312420&avatarId=15888",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312420&avatarId=15888",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312420&avatarId=15888"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11963",
- "id": "11963",
- "description": "Apache Cordova related projects",
- "name": "Cordova"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314425",
- "id": "12314425",
- "key": "CURATOR",
- "name": "Apache Curator",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314425&avatarId=16745",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314425&avatarId=16745",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314425&avatarId=16745",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314425&avatarId=16745"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312321",
- "id": "12312321",
- "key": "DIRECTMEMORY",
- "name": "Apache DirectMemory",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312321&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312321&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312321&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312321&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313820",
- "id": "12313820",
- "key": "DRILL",
- "name": "Apache Drill",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313820&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313820&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313820&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313820&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319420",
- "id": "12319420",
- "key": "FINERACT",
- "name": "Apache Fineract",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319420&avatarId=25736",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319420&avatarId=25736",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319420&avatarId=25736",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319420&avatarId=25736"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313521",
- "id": "12313521",
- "key": "FLEX",
- "name": "Apache Flex",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313521&avatarId=16182",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313521&avatarId=16182",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313521&avatarId=16182",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313521&avatarId=16182"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318825",
- "id": "12318825",
- "key": "FREEMARKER",
- "name": "Apache Freemarker",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318825&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318825&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318825&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318825&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319920",
- "id": "12319920",
- "key": "GEARPUMP",
- "name": "Apache Gearpump",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319920&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319920&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319920&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319920&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311172",
- "id": "12311172",
- "key": "GORA",
- "name": "Apache Gora",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311172&avatarId=10423",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311172&avatarId=10423",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311172&avatarId=10423",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311172&avatarId=10423"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11964",
- "id": "11964",
- "description": "Apache Gora related projects",
- "name": "Gora"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318826",
- "id": "12318826",
- "key": "HAWQ",
- "name": "Apache HAWQ",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318826&avatarId=25002",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318826&avatarId=25002",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318826&avatarId=25002",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318826&avatarId=25002"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314020",
- "id": "12314020",
- "key": "HELIX",
- "name": "Apache Helix",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314020&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314020&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314020&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314020&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12360",
- "id": "12360",
- "description": "Apache Helix projects",
- "name": "Helix"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318827",
- "id": "12318827",
- "key": "HORN",
- "name": "Apache Horn",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318827&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318827&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318827&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318827&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311220",
- "id": "12311220",
- "key": "JENA",
- "name": "Apache Jena",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311220&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11061",
- "id": "11061",
- "description": "Apache Jena related projects",
- "name": "Jena"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314427",
- "id": "12314427",
- "key": "KNOX",
- "name": "Apache Knox",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314427&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314427&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314427&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314427&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315923",
- "id": "12315923",
- "key": "LENS",
- "name": "Apache Lens",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315923&avatarId=22172",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315923&avatarId=22172",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315923&avatarId=22172",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315923&avatarId=22172"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13562",
- "id": "13562",
- "description": "Apache Lens",
- "name": "Lens"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315720",
- "id": "12315720",
- "key": "CLOWNFISH",
- "name": "Apache Lucy-Clownfish",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315720&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315720&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315720&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315720&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/14061",
- "id": "14061",
- "description": "Apache Lucy Related Projects",
- "name": "Lucy"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318923",
- "id": "12318923",
- "key": "MADLIB",
- "name": "Apache MADlib",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318923&avatarId=25311",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318923&avatarId=25311",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318923&avatarId=25311",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318923&avatarId=25311"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314621",
- "id": "12314621",
- "key": "MASFRES",
- "name": "Apache Maven Resource Bundles",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314621&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314621&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314621&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314621&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314523",
- "id": "12314523",
- "key": "METAMODEL",
- "name": "Apache MetaModel",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314523&avatarId=22051",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314523&avatarId=22051",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314523&avatarId=22051",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314523&avatarId=22051"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316020",
- "id": "12316020",
- "key": "NIFI",
- "name": "Apache NiFi",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316020&avatarId=22284",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316020&avatarId=22284",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316020&avatarId=22284",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316020&avatarId=22284"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13460",
- "id": "13460",
- "description": "Apache NiFi",
- "name": "NiFi"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319921",
- "id": "12319921",
- "key": "MINIFI",
- "name": "Apache NiFi MiNiFi",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319921&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319921&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319921&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319921&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13460",
- "id": "13460",
- "description": "Apache NiFi",
- "name": "NiFi"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314420",
- "id": "12314420",
- "key": "OLTU",
- "name": "Apache Oltu",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314420&avatarId=18031",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314420&avatarId=18031",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314420&avatarId=18031",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314420&avatarId=18031"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12320021",
- "id": "12320021",
- "key": "OMID",
- "name": "Apache Omid",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12320021&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12320021&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12320021&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12320021&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314120",
- "id": "12314120",
- "key": "ONAMI",
- "name": "Apache Onami",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314120&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314120&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314120&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314120&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314422",
- "id": "12314422",
- "key": "CLIMATE",
- "name": "Apache Open Climate Workbench",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314422&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314422&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314422&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314422&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318220",
- "id": "12318220",
- "key": "OPENAZ",
- "name": "Apache OpenAZ",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318220&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318621",
- "id": "12318621",
- "key": "QPIDIT",
- "name": "Apache QPID IT",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318621&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318621&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318621&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318621&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12320022",
- "id": "12320022",
- "key": "QUICKSTEP",
- "name": "Apache Quickstep",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12320022&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12320022&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12320022&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12320022&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310750",
- "id": "12310750",
- "key": "RAT",
- "name": "Apache Rat",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310750&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310750&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310750&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310750&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11060",
- "id": "11060",
- "description": "Comprehend and audit software distributions",
- "name": "Creadur"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314421",
- "id": "12314421",
- "key": "RIPPLE",
- "name": "Apache Ripple",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314421&avatarId=16737",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314421&avatarId=16737",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314421&avatarId=16737",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314421&avatarId=16737"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310906",
- "id": "12310906",
- "key": "ROL",
- "name": "Apache Roller",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310906&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310906&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310906&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310906&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10331",
- "id": "10331",
- "description": "",
- "name": "Roller"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312322",
- "id": "12312322",
- "key": "S4",
- "name": "Apache S4",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312322&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312322&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312322&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312322&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314820",
- "id": "12314820",
- "key": "STORM",
- "name": "Apache Storm",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314820&avatarId=21667",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314820&avatarId=21667",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314820&avatarId=21667",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314820&avatarId=21667"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13260",
- "id": "13260",
- "description": "Apache Storm Related",
- "name": "Storm"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318322",
- "id": "12318322",
- "key": "TAVERNA",
- "name": "Apache Taverna",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318322&avatarId=26751",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318322&avatarId=26751",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318322&avatarId=26751",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318322&avatarId=26751"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313620",
- "id": "12313620",
- "key": "TENTACLES",
- "name": "Apache Tentacles",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313620&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313620&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313620&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313620&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11060",
- "id": "11060",
- "description": "Comprehend and audit software distributions",
- "name": "Creadur"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314426",
- "id": "12314426",
- "key": "TEZ",
- "name": "Apache Tez",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314426&avatarId=20336",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314426&avatarId=20336",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314426&avatarId=20336",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314426&avatarId=20336"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12761",
- "id": "12761",
- "description": "Apache Tez related ",
- "name": "Tez"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312120",
- "id": "12312120",
- "key": "MTOMCAT",
- "name": "Apache Tomcat Maven Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312120&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312120&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312120&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312120&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318620",
- "id": "12318620",
- "key": "TRAFODION",
- "name": "Apache Trafodion",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318620&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318620&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318620&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318620&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315020",
- "id": "12315020",
- "key": "TWILL",
- "name": "Apache Twill",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315020&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315020&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315020&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315020&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319220",
- "id": "12319220",
- "key": "UNOMI",
- "name": "Apache Unomi",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319220&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311110",
- "id": "12311110",
- "key": "WHIRR",
- "name": "Apache Whirr (retired)",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311110&avatarId=10381",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311110&avatarId=10381",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311110&avatarId=10381",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311110&avatarId=10381"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313020",
- "id": "12313020",
- "key": "WHISKER",
- "name": "Apache Whisker",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313020&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313020&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313020&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313020&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11060",
- "id": "11060",
- "description": "Comprehend and audit software distributions",
- "name": "Creadur"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311090",
- "id": "12311090",
- "key": "APACHECON",
- "name": "Apachecon",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311090&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311090&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311090&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311090&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316920",
- "id": "12316920",
- "key": "MRM",
- "name": "Archiva",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316920&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316920&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316920&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316920&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13564",
- "id": "13564",
- "description": "Apache Archiva Project",
- "name": "Archiva"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310981",
- "id": "12310981",
- "key": "ARIES",
- "name": "Aries",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310981&avatarId=10065",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310981&avatarId=10065",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310981&avatarId=10065",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310981&avatarId=10065"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10520",
- "id": "10520",
- "description": "Apache Aries",
- "name": "Aries"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310752",
- "id": "12310752",
- "key": "ASYNCWEB",
- "name": "Asyncweb",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310752&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310752&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310752&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310752&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10240",
- "id": "10240",
- "description": "MINA related projects",
- "name": "MINA"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318521",
- "id": "12318521",
- "key": "ATLAS",
- "name": "Atlas",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318521&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318521&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318521&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318521&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310920",
- "id": "12310920",
- "key": "ATTIC",
- "name": "Attic",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310920&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310920&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310920&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310920&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10130",
- "id": "10130",
- "description": "General ASF-wide projects.",
- "name": "Apache Software Foundation"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314922",
- "id": "12314922",
- "key": "AURORA",
- "name": "Aurora",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314922&avatarId=19299",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314922&avatarId=19299",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314922&avatarId=19299",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314922&avatarId=19299"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13160",
- "id": "13160",
- "description": "Apache Aurora",
- "name": "Aurora"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10572",
- "id": "10572",
- "key": "AVALON",
- "name": "Avalon",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10572&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10572&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10572&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10572&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10426",
- "id": "10426",
- "key": "AVNSHARP",
- "name": "Avalon Castle",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10426&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10426&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10426&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10426&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10425",
- "id": "10425",
- "key": "RUNTIME",
- "name": "Avalon Merlin Runtime",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10425&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10425&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10425&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10425&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10531",
- "id": "10531",
- "key": "STUDIO",
- "name": "Avalon Merlin Studio",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10531&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10531&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10531&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10531&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10530",
- "id": "10530",
- "key": "CENTRAL",
- "name": "Avalon Metro Central",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10530&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10530&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10530&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10530&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10532",
- "id": "10532",
- "key": "PLANET",
- "name": "Avalon Metro Planet",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10532&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10532&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10532&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10532&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10533",
- "id": "10533",
- "key": "TOOLS",
- "name": "Avalon Metro Tools",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10533&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10533&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10533&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10533&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10081",
- "id": "10081",
- "key": "PNIX",
- "name": "Avalon Phoenix",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10081&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10081&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10081&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10081&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310911",
- "id": "12310911",
- "key": "AVRO",
- "name": "Avro",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310911&avatarId=12750",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310911&avatarId=12750",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310911&avatarId=12750",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310911&avatarId=12750"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10420",
- "id": "10420",
- "description": "a serialization system",
- "name": "Avro"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311190",
- "id": "12311190",
- "key": "AXIOM",
- "name": "Axiom",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311190&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311190&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311190&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311190&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10028",
- "id": "10028",
- "description": "Web services projects",
- "name": "Web Services"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10460",
- "id": "10460",
- "key": "AXIS",
- "name": "Axis",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10460&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10460&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10460&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10460&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10401",
- "id": "10401",
- "description": "Axis and Axis2 related projects",
- "name": "Axis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10461",
- "id": "10461",
- "key": "AXISCPP",
- "name": "Axis-C++",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10461&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10461&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10461&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10461&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10401",
- "id": "10401",
- "description": "Axis and Axis2 related projects",
- "name": "Axis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10462",
- "id": "10462",
- "key": "WSIF",
- "name": "Axis-WSIF",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10462&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10462&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10462&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10462&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10611",
- "id": "10611",
- "key": "AXIS2",
- "name": "Axis2",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10611&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10611&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10611&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10611&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10401",
- "id": "10401",
- "description": "Axis and Axis2 related projects",
- "name": "Axis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311160",
- "id": "12311160",
- "key": "TRANSPORTS",
- "name": "Axis2 Transports",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311160&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311160&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311160&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311160&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10401",
- "id": "10401",
- "description": "Axis and Axis2 related projects",
- "name": "Axis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310180",
- "id": "12310180",
- "key": "AXIS2C",
- "name": "Axis2-C",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310180&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310180&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310180&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310180&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10401",
- "id": "10401",
- "description": "Axis and Axis2 related projects",
- "name": "Axis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12320122",
- "id": "12320122",
- "key": "BAHIR",
- "name": "Bahir",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12320122&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12320122&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12320122&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12320122&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314924",
- "id": "12314924",
- "key": "BATCHEE",
- "name": "BatchEE",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314924&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314924&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314924&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314924&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314222",
- "id": "12314222",
- "key": "BATIK",
- "name": "Batik",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314222&avatarId=21762",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314222&avatarId=21762",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314222&avatarId=21762",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314222&avatarId=21762"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319527",
- "id": "12319527",
- "key": "BEAM",
- "name": "Beam",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319527&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319527&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319527&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319527&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10570",
- "id": "10570",
- "key": "BEEHIVE",
- "name": "Beehive",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10570&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10570&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10570&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10570&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311420",
- "id": "12311420",
- "key": "BIGTOP",
- "name": "Bigtop",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311420&avatarId=11135",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311420&avatarId=11135",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311420&avatarId=11135",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311420&avatarId=11135"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11660",
- "id": "11660",
- "description": "BigTop related",
- "name": "BigTop"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310907",
- "id": "12310907",
- "key": "BLUESKY",
- "name": "Bluesky",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310907&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310907&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310907&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310907&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311293",
- "id": "12311293",
- "key": "BOOKKEEPER",
- "name": "Bookkeeper",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311293&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311293&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311293&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311293&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312920",
- "id": "12312920",
- "key": "TM",
- "name": "BRAND",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312920&avatarId=10009",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312920&avatarId=10009",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312920&avatarId=10009",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312920&avatarId=10009"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315421",
- "id": "12315421",
- "key": "BROOKLYN",
- "name": "Brooklyn",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315421&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315421&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315421&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315421&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13861",
- "id": "13861",
- "description": "Apache Brooklyn project",
- "name": "Brooklyn"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310734",
- "id": "12310734",
- "key": "BUILDR",
- "name": "Buildr",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310734&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310734&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310734&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310734&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10350",
- "id": "10350",
- "description": "Apache Buildr related projects",
- "name": "Buildr"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311080",
- "id": "12311080",
- "key": "BVAL",
- "name": "BVal",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311080&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311080&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311080&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311080&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10860",
- "id": "10860",
- "description": "BVal related",
- "name": "BVal"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310070",
- "id": "12310070",
- "key": "STDCXX",
- "name": "C++ Standard Library",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310070&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310070&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310070&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310070&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10290",
- "id": "10290",
- "description": "C++ Standard Library projects",
- "name": "C++ Standard Library"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10471",
- "id": "10471",
- "key": "CACTUS",
- "name": "Cactus",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10471&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10471&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10471&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10471&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315524",
- "id": "12315524",
- "key": "CALCITE",
- "name": "Calcite",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315524&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315524&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315524&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315524&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13760",
- "id": "13760",
- "description": "Apache Calcite",
- "name": "Calcite"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311211",
- "id": "12311211",
- "key": "CAMEL",
- "name": "Camel",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311211&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311211&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311211&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311211&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10482",
- "id": "10482",
- "description": "",
- "name": "Camel"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310865",
- "id": "12310865",
- "key": "CASSANDRA",
- "name": "Cassandra",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310865&avatarId=12034",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310865&avatarId=12034",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310865&avatarId=12034",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310865&avatarId=12034"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11961",
- "id": "11961",
- "description": "Apache Cassandra related projects",
- "name": "Cassandra"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310903",
- "id": "12310903",
- "key": "CAY",
- "name": "Cayenne",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310903&avatarId=10010",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310903&avatarId=10010",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310903&avatarId=10010",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310903&avatarId=10010"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10330",
- "id": "10330",
- "description": "",
- "name": "Cayenne"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311213",
- "id": "12311213",
- "key": "CELIX",
- "name": "Celix",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311213&avatarId=22342",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311213&avatarId=22342",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311213&avatarId=22342",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311213&avatarId=22342"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310936",
- "id": "12310936",
- "key": "CMIS",
- "name": "Chemistry",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310936&avatarId=19264",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310936&avatarId=19264",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310936&avatarId=19264",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310936&avatarId=19264"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10540",
- "id": "10540",
- "description": "Apache Chemistry",
- "name": "Chemistry"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310900",
- "id": "12310900",
- "key": "CHUKWA",
- "name": "Chukwa",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310900&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310900&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310900&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310900&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10440",
- "id": "10440",
- "description": "Distributed log aggregation system",
- "name": "Chukwa"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311020",
- "id": "12311020",
- "key": "CLEREZZA",
- "name": "Clerezza",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311020&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311020&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311020&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311020&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310902",
- "id": "12310902",
- "key": "CLK",
- "name": "Click",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310902&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310902&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310902&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310902&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310901",
- "id": "12310901",
- "key": "CLKE",
- "name": "Click Eclipse",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310901&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310901&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310901&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310901&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10390",
- "id": "10390",
- "description": "Click related projects",
- "name": "Click"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313920",
- "id": "12313920",
- "key": "CLOUDSTACK",
- "name": "CloudStack",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313920&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313920&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313920&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313920&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11969",
- "id": "11969",
- "description": "Apache Cloudstack related projects",
- "name": "Cloudstack"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310170",
- "id": "12310170",
- "key": "COCOON",
- "name": "Cocoon",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310170&avatarId=16588",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310170&avatarId=16588",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310170&avatarId=16588",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310170&avatarId=16588"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10160",
- "id": "10160",
- "description": "Apache Cocoon related projects",
- "name": "Cocoon"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310838",
- "id": "12310838",
- "key": "COCOON3",
- "name": "Cocoon 3",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310838&avatarId=16587",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310838&avatarId=16587",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310838&avatarId=16587",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310838&avatarId=16587"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10160",
- "id": "10160",
- "description": "Apache Cocoon related projects",
- "name": "Cocoon"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310466",
- "id": "12310466",
- "key": "COMMONSSITE",
- "name": "Commons All",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310466&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310466&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310466&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310466&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310459",
- "id": "12310459",
- "key": "ATTRIBUTES",
- "name": "Commons Attributes",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310459&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310459&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310459&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310459&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314220",
- "id": "12314220",
- "key": "BCEL",
- "name": "Commons BCEL",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314220&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310460",
- "id": "12310460",
- "key": "BEANUTILS",
- "name": "Commons BeanUtils",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310460&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310460&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310460&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310460&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310461",
- "id": "12310461",
- "key": "BETWIXT",
- "name": "Commons Betwixt",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310461&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310461&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310461&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310461&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310621",
- "id": "12310621",
- "key": "BSF",
- "name": "Commons BSF",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310621&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310621&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310621&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310621&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310462",
- "id": "12310462",
- "key": "CHAIN",
- "name": "Commons Chain",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310462&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310462&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310462&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310462&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310463",
- "id": "12310463",
- "key": "CLI",
- "name": "Commons CLI",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310463&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310463&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310463&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310463&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310464",
- "id": "12310464",
- "key": "CODEC",
- "name": "Commons Codec",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310464&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310464&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310464&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310464&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310465",
- "id": "12310465",
- "key": "COLLECTIONS",
- "name": "Commons Collections",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310465&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310465&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310465&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310465&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310904",
- "id": "12310904",
- "key": "COMPRESS",
- "name": "Commons Compress",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310904&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310904&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310904&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310904&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310467",
- "id": "12310467",
- "key": "CONFIGURATION",
- "name": "Commons Configuration",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310467&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310467&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310467&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310467&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12320024",
- "id": "12320024",
- "key": "CRYPTO",
- "name": "Commons Crypto",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12320024&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12320024&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12320024&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12320024&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313222",
- "id": "12313222",
- "key": "CSV",
- "name": "Commons CSV",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313222&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313222&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313222&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313222&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310468",
- "id": "12310468",
- "key": "DAEMON",
- "name": "Commons Daemon",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310468&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310468&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310468&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310468&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310469",
- "id": "12310469",
- "key": "DBCP",
- "name": "Commons Dbcp",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310469&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310469&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310469&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310469&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310470",
- "id": "12310470",
- "key": "DBUTILS",
- "name": "Commons DbUtils",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310470&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310470&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310470&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310470&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310471",
- "id": "12310471",
- "key": "DIGESTER",
- "name": "Commons Digester",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310471&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310471&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310471&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310471&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310472",
- "id": "12310472",
- "key": "DISCOVERY",
- "name": "Commons Discovery",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310472&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310472&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310472&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310472&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310591",
- "id": "12310591",
- "key": "DORMANT",
- "name": "Commons Dormant",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310591&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310591&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310591&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310591&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310473",
- "id": "12310473",
- "key": "EL",
- "name": "Commons EL",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310473&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310473&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310473&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310473&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310474",
- "id": "12310474",
- "key": "EMAIL",
- "name": "Commons Email",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310474&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310474&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310474&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310474&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310814",
- "id": "12310814",
- "key": "EXEC",
- "name": "Commons Exec",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310814&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310814&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310814&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310814&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310475",
- "id": "12310475",
- "key": "FEEDPARSER",
- "name": "Commons FeedParser",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310475&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310475&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310475&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310475&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310476",
- "id": "12310476",
- "key": "FILEUPLOAD",
- "name": "Commons FileUpload",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310476&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310476&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310476&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310476&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312520",
- "id": "12312520",
- "key": "FUNCTOR",
- "name": "Commons Functor",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312520&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312520&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312520&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312520&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313421",
- "id": "12313421",
- "key": "IMAGING",
- "name": "Commons Imaging",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313421&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313421&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313421&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313421&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310477",
- "id": "12310477",
- "key": "IO",
- "name": "Commons IO",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310477&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310477&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310477&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310477&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310650",
- "id": "12310650",
- "key": "JCI",
- "name": "Commons JCI",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310650&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310650&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310650&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310650&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310506",
- "id": "12310506",
- "key": "JCS",
- "name": "Commons JCS",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310506&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310506&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310506&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310506&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10012",
- "id": "10012",
- "key": "JELLY",
- "name": "Commons Jelly",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10012&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10012&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10012&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10012&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310479",
- "id": "12310479",
- "key": "JEXL",
- "name": "Commons JEXL",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310479&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310479&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310479&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310479&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310480",
- "id": "12310480",
- "key": "JXPATH",
- "name": "Commons JXPath",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310480&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310480&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310480&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310480&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310481",
- "id": "12310481",
- "key": "LANG",
- "name": "Commons Lang",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310481&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310481&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310481&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310481&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310483",
- "id": "12310483",
- "key": "LAUNCHER",
- "name": "Commons Launcher",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310483&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310483&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310483&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310483&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310484",
- "id": "12310484",
- "key": "LOGGING",
- "name": "Commons Logging",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310484&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310484&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310484&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310484&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310485",
- "id": "12310485",
- "key": "MATH",
- "name": "Commons Math",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310485&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310485&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310485&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310485&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310486",
- "id": "12310486",
- "key": "MODELER",
- "name": "Commons Modeler",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310486&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310486&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310486&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310486&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310487",
- "id": "12310487",
- "key": "NET",
- "name": "Commons Net",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310487&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310487&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310487&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310487&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313525",
- "id": "12313525",
- "key": "OGNL",
- "name": "Commons OGNL",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313525&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313525&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313525&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313525&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310488",
- "id": "12310488",
- "key": "POOL",
- "name": "Commons Pool",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310488&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310488&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310488&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310488&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310489",
- "id": "12310489",
- "key": "PRIMITIVES",
- "name": "Commons Primitives",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310489&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310489&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310489&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310489&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310731",
- "id": "12310731",
- "key": "PROXY",
- "name": "Commons Proxy",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310731&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310731&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310731&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310731&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310490",
- "id": "12310490",
- "key": "RESOURCES",
- "name": "Commons Resources",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310490&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310490&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310490&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310490&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310491",
- "id": "12310491",
- "key": "SANDBOX",
- "name": "Commons Sandbox",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310491&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310491&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310491&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310491&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310720",
- "id": "12310720",
- "key": "SANSELAN",
- "name": "Commons Sanselan",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310720&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310720&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310720&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310720&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310492",
- "id": "12310492",
- "key": "SCXML",
- "name": "Commons SCXML",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310492&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310492&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310492&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310492&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318221",
- "id": "12318221",
- "key": "TEXT",
- "name": "Commons Text",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318221&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318221&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318221&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318221&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310493",
- "id": "12310493",
- "key": "TRANSACTION",
- "name": "Commons Transaction",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310493&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310493&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310493&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310493&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310494",
- "id": "12310494",
- "key": "VALIDATOR",
- "name": "Commons Validator",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310494&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310494&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310494&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310494&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310495",
- "id": "12310495",
- "key": "VFS",
- "name": "Commons VFS",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310495&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310495&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310495&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310495&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315320",
- "id": "12315320",
- "key": "WEAVER",
- "name": "Commons Weaver",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315320&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315320&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315320&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315320&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10260",
- "id": "10260",
- "description": "Apache Commons components",
- "name": "Commons"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311010",
- "id": "12311010",
- "key": "COMDEV",
- "name": "Community Development",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311010&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311010&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311010&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311010&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10130",
- "id": "10130",
- "description": "General ASF-wide projects.",
- "name": "Apache Software Foundation"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316921",
- "id": "12316921",
- "key": "CONTINUUM",
- "name": "Continuum",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316921&avatarId=23443",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316921&avatarId=23443",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316921&avatarId=23443",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316921&avatarId=23443"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13565",
- "id": "13565",
- "description": "Apache Continuum Project",
- "name": "Continuum"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316120",
- "id": "12316120",
- "key": "COR",
- "name": "Corinthia",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316120&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316120&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316120&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316120&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318722",
- "id": "12318722",
- "key": "COTTON",
- "name": "COTTON",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318722&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318722&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318722&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318722&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310780",
- "id": "12310780",
- "key": "COUCHDB",
- "name": "CouchDB",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310780&avatarId=13936",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310780&avatarId=13936",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310780&avatarId=13936",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310780&avatarId=13936"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10320",
- "id": "10320",
- "description": "CouchDB & related projects",
- "name": "CouchDB"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313526",
- "id": "12313526",
- "key": "CRUNCH",
- "name": "Crunch",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313526&avatarId=23934",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313526&avatarId=23934",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313526&avatarId=23934",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313526&avatarId=23934"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11965",
- "id": "11965",
- "description": "Apache Crunch related projects",
- "name": "Crunch"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313621",
- "id": "12313621",
- "key": "CTAKES",
- "name": "cTAKES",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313621&avatarId=16772",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313621&avatarId=16772",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313621&avatarId=16772",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313621&avatarId=16772"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310511",
- "id": "12310511",
- "key": "CXF",
- "name": "CXF",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310511&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310511&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310511&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310511&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10300",
- "id": "10300",
- "description": "CXF related projects",
- "name": "CXF"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311031",
- "id": "12311031",
- "key": "DOSGI",
- "name": "CXF Distributed OSGi",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311031&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311031&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311031&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311031&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10300",
- "id": "10300",
- "description": "CXF related projects",
- "name": "CXF"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315520",
- "id": "12315520",
- "key": "CXFXJC",
- "name": "CXF XJC Utils",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315520&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315520&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315520&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315520&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10300",
- "id": "10300",
- "description": "CXF related projects",
- "name": "CXF"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313420",
- "id": "12313420",
- "key": "FEDIZ",
- "name": "CXF-Fediz",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313420&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313420&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313420&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313420&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10300",
- "id": "10300",
- "description": "CXF related projects",
- "name": "CXF"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315221",
- "id": "12315221",
- "key": "DATAFU",
- "name": "DataFu",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315221&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315221&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315221&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315221&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310320",
- "id": "12310320",
- "key": "DAYTRADER",
- "name": "DayTrader",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310320&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310320&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310320&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310320&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10061",
- "id": "10061",
- "description": "Apache J2EE project",
- "name": "Geronimo"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10731",
- "id": "10731",
- "key": "DDLUTILS",
- "name": "DdlUtils",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10731&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10731&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10731&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10731&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10090",
- "id": "10090",
- "description": "DB related projects",
- "name": "DB"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311150",
- "id": "12311150",
- "key": "DTACLOUD",
- "name": "DeltaCloud",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311150&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311150&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311150&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311150&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312820",
- "id": "12312820",
- "key": "DELTASPIKE",
- "name": "DeltaSpike",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312820&avatarId=21232",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312820&avatarId=21232",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312820&avatarId=21232",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312820&avatarId=21232"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12660",
- "id": "12660",
- "description": "Apache Deltaspike related projects",
- "name": "Deltaspike"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10403",
- "id": "10403",
- "key": "DEPOT",
- "name": "Depot",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10403&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10403&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10403&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10403&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10594",
- "id": "10594",
- "key": "DERBY",
- "name": "Derby",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10594&avatarId=10122",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10594&avatarId=10122",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10594&avatarId=10122",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10594&avatarId=10122"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10090",
- "id": "10090",
- "description": "DB related projects",
- "name": "DB"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312921",
- "id": "12312921",
- "key": "DMAP",
- "name": "DeviceMap",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312921&avatarId=20134",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312921&avatarId=20134",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312921&avatarId=20134",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312921&avatarId=20134"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12860",
- "id": "12860",
- "description": "Apache Devicemap related.",
- "name": "Devicemap"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10400",
- "id": "10400",
- "key": "DIR",
- "name": "Directory",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10400&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10400&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10400&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10400&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310260",
- "id": "12310260",
- "key": "DIRSERVER",
- "name": "Directory ApacheDS",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310260&avatarId=13247",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310260&avatarId=13247",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310260&avatarId=13247",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310260&avatarId=13247"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310962",
- "id": "12310962",
- "key": "DIRAPI",
- "name": "Directory Client API",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310962&avatarId=23691",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310962&avatarId=23691",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310962&avatarId=23691",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310962&avatarId=23691"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310880",
- "id": "12310880",
- "key": "DIRGROOVY",
- "name": "Directory Groovy LDAP",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310880&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310880&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310880&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310880&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310910",
- "id": "12310910",
- "key": "DIRKRB",
- "name": "Directory Kerberos",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310910&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310910&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310910&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310910&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10512",
- "id": "10512",
- "key": "DIRNAMING",
- "name": "Directory Naming",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10512&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10512&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10512&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10512&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310770",
- "id": "12310770",
- "key": "DIRSHARED",
- "name": "Directory Shared (Please use DIRAPI instead)",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310770&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310770&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310770&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310770&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310590",
- "id": "12310590",
- "key": "DIRSTUDIO",
- "name": "Directory Studio",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310590&avatarId=13246",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310590&avatarId=13246",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310590&avatarId=13246",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310590&avatarId=13246"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310652",
- "id": "12310652",
- "key": "DBF",
- "name": "DocBook Framework",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310652&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310652&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310652&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310652&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10200",
- "id": "10200",
- "description": "Apache Velocity related projects",
- "name": "Velocity"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310842",
- "id": "12310842",
- "key": "DROIDS",
- "name": "Droids",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310842&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310842&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310842&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310842&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310670",
- "id": "12310670",
- "key": "DVSL",
- "name": "Dvsl",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310670&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310670&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310670&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310670&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10200",
- "id": "10200",
- "description": "Apache Velocity related projects",
- "name": "Velocity"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319222",
- "id": "12319222",
- "key": "EAGLE",
- "name": "Eagle",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319222&avatarId=25467",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319222&avatarId=25467",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319222&avatarId=25467",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319222&avatarId=25467"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311270",
- "id": "12311270",
- "key": "EASYANT",
- "name": "EasyAnt",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311270&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311270&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311270&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311270&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10310",
- "id": "10310",
- "description": "Apache Ant related projects",
- "name": "Ant"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310813",
- "id": "12310813",
- "key": "ECS",
- "name": "ECS",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310813&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310813&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310813&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310813&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310820",
- "id": "12310820",
- "key": "EMPIREDB",
- "name": "Empire-DB",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310820&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310820&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310820&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310820&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310850",
- "id": "12310850",
- "key": "ESME",
- "name": "Enterprise Social Messaging Environment (ESME)",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310850&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310850&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310850&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310850&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10490",
- "id": "10490",
- "description": "Apache ESME related projects",
- "name": "ESME"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314926",
- "id": "12314926",
- "key": "ESCIMO",
- "name": "eSCIMo ",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314926&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314926&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314926&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314926&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310835",
- "id": "12310835",
- "key": "ETCH",
- "name": "Etch",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310835&avatarId=16525",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310835&avatarId=16525",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310835&avatarId=16525",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310835&avatarId=16525"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11861",
- "id": "11861",
- "description": "Apache Etch related projects",
- "name": "Etch"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10620",
- "id": "10620",
- "key": "EWS",
- "name": "ews",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10620&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10620&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10620&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10620&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10428",
- "id": "10428",
- "key": "EXLBR",
- "name": "Excalibur Components",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10428&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10428&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10428&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10428&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10420",
- "id": "10420",
- "key": "FORTRESS",
- "name": "Excalibur Fortress",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10420&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10420&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10420&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10420&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314429",
- "id": "12314429",
- "key": "FALCON",
- "name": "Falcon",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314429&avatarId=16974",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314429&avatarId=16974",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314429&avatarId=16974",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314429&avatarId=16974"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310100",
- "id": "12310100",
- "key": "FELIX",
- "name": "Felix",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310100&avatarId=15642",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310100&avatarId=15642",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310100&avatarId=15642",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310100&avatarId=15642"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10121",
- "id": "10121",
- "description": "OSGi container projects",
- "name": "Felix"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315522",
- "id": "12315522",
- "key": "FLINK",
- "name": "Flink",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315522&avatarId=21685",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315522&avatarId=21685",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315522&avatarId=21685",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315522&avatarId=21685"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13360",
- "id": "13360",
- "description": "Apache Flink",
- "name": "Flink"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311321",
- "id": "12311321",
- "key": "FLUME",
- "name": "Flume",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311321&avatarId=11433",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311321&avatarId=11433",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311321&avatarId=11433",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311321&avatarId=11433"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11960",
- "id": "11960",
- "description": "Apache Flume related projects",
- "name": "Flume"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314223",
- "id": "12314223",
- "key": "FOP",
- "name": "FOP",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314223&avatarId=21763",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314223&avatarId=21763",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314223&avatarId=21763",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314223&avatarId=21763"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310000",
- "id": "12310000",
- "key": "FOR",
- "name": "Forrest",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310000&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310000&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310000&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310000&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10112",
- "id": "10112",
- "description": "Forrest related projects",
- "name": "Forrest"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315921",
- "id": "12315921",
- "key": "FC",
- "name": "FORTRESS",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315921&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315921&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315921&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315921&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10571",
- "id": "10571",
- "key": "FTPSERVER",
- "name": "FtpServer",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10571&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10571&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10571&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10571&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10240",
- "id": "10240",
- "description": "MINA related projects",
- "name": "MINA"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310200",
- "id": "12310200",
- "key": "GBUILD",
- "name": "GBuild",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310200&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310200&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310200&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310200&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10061",
- "id": "10061",
- "description": "Apache J2EE project",
- "name": "Geronimo"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318420",
- "id": "12318420",
- "key": "GEODE",
- "name": "Geode",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318420&avatarId=23734",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318420&avatarId=23734",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318420&avatarId=23734",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318420&avatarId=23734"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10220",
- "id": "10220",
- "key": "GERONIMO",
- "name": "Geronimo",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10220&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10061",
- "id": "10061",
- "description": "Apache J2EE project",
- "name": "Geronimo"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310111",
- "id": "12310111",
- "key": "GERONIMODEVTOOLS",
- "name": "Geronimo-Devtools",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310111&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310111&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310111&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310111&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10061",
- "id": "10061",
- "description": "Apache J2EE project",
- "name": "Geronimo"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311820",
- "id": "12311820",
- "key": "GIRAPH",
- "name": "Giraph",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311820&avatarId=14838",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311820&avatarId=14838",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311820&avatarId=14838",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311820&avatarId=14838"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11966",
- "id": "11966",
- "description": "Apache Giraph related projects",
- "name": "Giraph"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12320121",
- "id": "12320121",
- "key": "GOSSIP",
- "name": "Gossip",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12320121&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12320121&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12320121&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12320121&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10661",
- "id": "10661",
- "key": "GRFT",
- "name": "Graffito",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10661&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10661&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10661&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10661&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318123",
- "id": "12318123",
- "key": "GROOVY",
- "name": "Groovy",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318123&avatarId=24643",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318123&avatarId=24643",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318123&avatarId=24643",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318123&avatarId=24643"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13860",
- "id": "13860",
- "description": "Apache Groovy project",
- "name": "Groovy"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310503",
- "id": "12310503",
- "key": "GSHELL",
- "name": "GShell",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310503&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310503&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310503&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310503&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10061",
- "id": "10061",
- "description": "Apache J2EE project",
- "name": "Geronimo"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319620",
- "id": "12319620",
- "key": "GUACAMOLE",
- "name": "Guacamole",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319620&avatarId=26958",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319620&avatarId=26958",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319620&avatarId=26958",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319620&avatarId=26958"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10457",
- "id": "10457",
- "key": "GUMP",
- "name": "Gump",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10457&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10457&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10457&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10457&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10030",
- "id": "10030",
- "description": "Gump related projects",
- "name": "Gump"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310240",
- "id": "12310240",
- "key": "HADOOP",
- "name": "Hadoop Common",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310240&avatarId=10095",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310240&avatarId=10095",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310240&avatarId=10095",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310240&avatarId=10095"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10292",
- "id": "10292",
- "description": "Scalable Distributed Computing",
- "name": "Hadoop"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314121",
- "id": "12314121",
- "key": "HDT",
- "name": "Hadoop Development Tools",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314121&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314121&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314121&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314121&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310942",
- "id": "12310942",
- "key": "HDFS",
- "name": "Hadoop HDFS",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310942&avatarId=10094",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310942&avatarId=10094",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310942&avatarId=10094",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310942&avatarId=10094"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10292",
- "id": "10292",
- "description": "Scalable Distributed Computing",
- "name": "Hadoop"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310941",
- "id": "12310941",
- "key": "MAPREDUCE",
- "name": "Hadoop Map/Reduce",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310941&avatarId=10096",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310941&avatarId=10096",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310941&avatarId=10096",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310941&avatarId=10096"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10292",
- "id": "10292",
- "description": "Scalable Distributed Computing",
- "name": "Hadoop"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313722",
- "id": "12313722",
- "key": "YARN",
- "name": "Hadoop YARN",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313722&avatarId=15135",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313722&avatarId=15135",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313722&avatarId=15135",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313722&avatarId=15135"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10292",
- "id": "10292",
- "description": "Scalable Distributed Computing",
- "name": "Hadoop"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310810",
- "id": "12310810",
- "key": "HAMA",
- "name": "Hama",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310810&avatarId=10328",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310810&avatarId=10328",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310810&avatarId=10328",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310810&avatarId=10328"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11561",
- "id": "11561",
- "description": "Apache Hama related projects",
- "name": "Hama"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310050",
- "id": "12310050",
- "key": "HARMONY",
- "name": "Harmony",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310050&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310050&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310050&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310050&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310753",
- "id": "12310753",
- "key": "HBASE",
- "name": "HBase",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310753&avatarId=16550",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310753&avatarId=16550",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310753&avatarId=16550",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310753&avatarId=16550"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10292",
- "id": "10292",
- "description": "Scalable Distributed Computing",
- "name": "Hadoop"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311291",
- "id": "12311291",
- "key": "HCATALOG",
- "name": "HCatalog",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311291&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311291&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311291&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311291&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310550",
- "id": "12310550",
- "key": "HERALDRY",
- "name": "Heraldry",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310550&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310550&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310550&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310550&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311000",
- "id": "12311000",
- "key": "HISE",
- "name": "HISE",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311000&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311000&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311000&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311000&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310843",
- "id": "12310843",
- "key": "HIVE",
- "name": "Hive",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310843&avatarId=11935",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310843&avatarId=11935",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310843&avatarId=11935",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310843&avatarId=11935"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10292",
- "id": "10292",
- "description": "Scalable Distributed Computing",
- "name": "Hadoop"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10500",
- "id": "10500",
- "key": "HIVEMIND",
- "name": "HiveMind",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10500&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10500&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10500&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10500&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315924",
- "id": "12315924",
- "key": "HTRACE",
- "name": "HTrace",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315924&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315924&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315924&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315924&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311081",
- "id": "12311081",
- "key": "HTTPASYNC",
- "name": "HttpComponents HttpAsyncClient",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311081&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311081&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311081&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311081&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10280",
- "id": "10280",
- "description": "Apache HttpComponents Project",
- "name": "HttpComponents"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310360",
- "id": "12310360",
- "key": "HTTPCLIENT",
- "name": "HttpComponents HttpClient",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310360&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310360&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310360&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310360&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10280",
- "id": "10280",
- "description": "Apache HttpComponents Project",
- "name": "HttpComponents"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310340",
- "id": "12310340",
- "key": "HTTPCORE",
- "name": "HttpComponents HttpCore",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310340&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310340&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310340&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310340&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10280",
- "id": "10280",
- "description": "Apache HttpComponents Project",
- "name": "HttpComponents"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10602",
- "id": "10602",
- "key": "IBATISNET",
- "name": "iBatis for .NET",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10602&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10602&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10602&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10602&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10601",
- "id": "10601",
- "key": "IBATIS",
- "name": "iBatis for Java [READ ONLY]",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10601&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10601&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10601&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10601&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310572",
- "id": "12310572",
- "key": "RBATIS",
- "name": "iBATIS for Ruby",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310572&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310572&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310572&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310572&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315922",
- "id": "12315922",
- "key": "IGNITE",
- "name": "Ignite",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315922&avatarId=22313",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315922&avatarId=22313",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315922&avatarId=22313",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315922&avatarId=22313"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13563",
- "id": "13563",
- "description": "Apache Ignite",
- "name": "Ignite"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310742",
- "id": "12310742",
- "key": "IMPERIUS",
- "name": "Imperius",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310742&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310742&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310742&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310742&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10595",
- "id": "10595",
- "key": "INCUBATOR",
- "name": "Incubator",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10595&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10595&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10595&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10595&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10410",
- "id": "10410",
- "key": "INFRA",
- "name": "Infrastructure",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10410&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10410&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10410&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10410&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10130",
- "id": "10130",
- "description": "General ASF-wide projects.",
- "name": "Apache Software Foundation"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319622",
- "id": "12319622",
- "key": "INFRAP",
- "name": "Infrastructure Projects",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319622&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319622&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319622&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319622&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319528",
- "id": "12319528",
- "key": "IOTA",
- "name": "Iota",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319528&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319528&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319528&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319528&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311171",
- "id": "12311171",
- "key": "ISIS",
- "name": "Isis",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311171&avatarId=16437",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311171&avatarId=16437",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311171&avatarId=16437",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311171&avatarId=16437"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11860",
- "id": "11860",
- "description": "",
- "name": "Isis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310580",
- "id": "12310580",
- "key": "IVY",
- "name": "Ivy",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310580&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310580&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310580&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310580&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10310",
- "id": "10310",
- "description": "Apache Ant related projects",
- "name": "Ant"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310640",
- "id": "12310640",
- "key": "IVYDE",
- "name": "IvyDE",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310640&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310640&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310640&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310640&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10310",
- "id": "10310",
- "description": "Apache Ant related projects",
- "name": "Ant"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10591",
- "id": "10591",
- "key": "JCR",
- "name": "Jackrabbit Content Repository",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10591&avatarId=10052",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10591&avatarId=10052",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10591&avatarId=10052",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10591&avatarId=10052"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10170",
- "id": "10170",
- "description": "Jackrabbit related projects",
- "name": "Jackrabbit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314920",
- "id": "12314920",
- "key": "JCRVLT",
- "name": "Jackrabbit FileVault",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314920&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314920&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314920&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314920&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10170",
- "id": "10170",
- "description": "Jackrabbit related projects",
- "name": "Jackrabbit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310935",
- "id": "12310935",
- "key": "JCRBENCH",
- "name": "Jackrabbit JCR Benchmark",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310935&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310935&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310935&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310935&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10170",
- "id": "10170",
- "description": "Jackrabbit related projects",
- "name": "Jackrabbit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310934",
- "id": "12310934",
- "key": "JCRCL",
- "name": "Jackrabbit JCR Classloader",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310934&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310934&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310934&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310934&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10170",
- "id": "10170",
- "description": "Jackrabbit related projects",
- "name": "Jackrabbit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310933",
- "id": "12310933",
- "key": "JCRSERVLET",
- "name": "Jackrabbit JCR Servlets",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310933&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310933&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310933&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310933&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10170",
- "id": "10170",
- "description": "Jackrabbit related projects",
- "name": "Jackrabbit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310970",
- "id": "12310970",
- "key": "JCRTCK",
- "name": "Jackrabbit JCR Tests",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310970&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310970&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310970&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310970&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10170",
- "id": "10170",
- "description": "Jackrabbit related projects",
- "name": "Jackrabbit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310862",
- "id": "12310862",
- "key": "JCRRMI",
- "name": "Jackrabbit JCR-RMI",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310862&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310862&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310862&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310862&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10170",
- "id": "10170",
- "description": "Jackrabbit related projects",
- "name": "Jackrabbit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313221",
- "id": "12313221",
- "key": "OAK",
- "name": "Jackrabbit Oak",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313221&avatarId=21936",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313221&avatarId=21936",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313221&avatarId=21936",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313221&avatarId=21936"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10170",
- "id": "10170",
- "description": "Jackrabbit related projects",
- "name": "Jackrabbit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310932",
- "id": "12310932",
- "key": "OCM",
- "name": "Jackrabbit OCM",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310932&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310932&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310932&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310932&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10170",
- "id": "10170",
- "description": "Jackrabbit related projects",
- "name": "Jackrabbit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310861",
- "id": "12310861",
- "key": "JCRSITE",
- "name": "Jackrabbit Site",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310861&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310861&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310861&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310861&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10170",
- "id": "10170",
- "description": "Jackrabbit related projects",
- "name": "Jackrabbit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310980",
- "id": "12310980",
- "key": "HUPA",
- "name": "James Hupa",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310980&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310980&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310980&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310980&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310831",
- "id": "12310831",
- "key": "IMAP",
- "name": "James Imap",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310831&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310831&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310831&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310831&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10020",
- "id": "10020",
- "description": "James related projects",
- "name": "James"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310990",
- "id": "12310990",
- "key": "JDKIM",
- "name": "James jDKIM",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310990&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310990&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310990&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310990&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10412",
- "id": "10412",
- "key": "JSIEVE",
- "name": "James jSieve",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10412&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10412&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10412&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10412&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10020",
- "id": "10020",
- "description": "James related projects",
- "name": "James"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310350",
- "id": "12310350",
- "key": "JSPF",
- "name": "James jSPF",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310350&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310350&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310350&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310350&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10020",
- "id": "10020",
- "description": "James related projects",
- "name": "James"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311221",
- "id": "12311221",
- "key": "MAILBOX",
- "name": "James Mailbox",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311221&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311221&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311221&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311221&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310660",
- "id": "12310660",
- "key": "MAILET",
- "name": "James Mailet",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310660&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310660&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310660&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310660&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10020",
- "id": "10020",
- "description": "James related projects",
- "name": "James"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310521",
- "id": "12310521",
- "key": "MIME4J",
- "name": "James Mime4j",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310521&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310521&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310521&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310521&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10020",
- "id": "10020",
- "description": "James related projects",
- "name": "James"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310847",
- "id": "12310847",
- "key": "MPT",
- "name": "James MPT",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310847&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310847&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310847&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310847&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10020",
- "id": "10020",
- "description": "James related projects",
- "name": "James"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310507",
- "id": "12310507",
- "key": "POSTAGE",
- "name": "James Postage",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310507&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310507&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310507&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310507&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10020",
- "id": "10020",
- "description": "James related projects",
- "name": "James"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311060",
- "id": "12311060",
- "key": "PROTOCOLS",
- "name": "James Protocols",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311060&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311060&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311060&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311060&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10020",
- "id": "10020",
- "description": "James related projects",
- "name": "James"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10411",
- "id": "10411",
- "key": "JAMES",
- "name": "James Server",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10411&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10411&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10411&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10411&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10020",
- "id": "10020",
- "description": "James related projects",
- "name": "James"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10463",
- "id": "10463",
- "key": "JAXME",
- "name": "JaxMe",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10463&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10463&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10463&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10463&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314430",
- "id": "12314430",
- "key": "JCLOUDS",
- "name": "jclouds",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314430&avatarId=19894",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314430&avatarId=19894",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314430&avatarId=19894",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314430&avatarId=19894"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12160",
- "id": "12160",
- "description": "Apache jclouds projects",
- "name": "jclouds"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10630",
- "id": "10630",
- "key": "JDO",
- "name": "JDO",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10630&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10630&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10630&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10630&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10090",
- "id": "10090",
- "description": "DB related projects",
- "name": "DB"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10493",
- "id": "10493",
- "key": "JS1",
- "name": "Jetspeed",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10493&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10493&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10493&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10493&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10050",
- "id": "10050",
- "description": "Apache Portals",
- "name": "Portals"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10492",
- "id": "10492",
- "key": "JS2",
- "name": "Jetspeed 2",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10492&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10492&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10492&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10492&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10050",
- "id": "10050",
- "description": "Apache Portals",
- "name": "Portals"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315523",
- "id": "12315523",
- "key": "JOHNZON",
- "name": "Johnzon",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315523&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315523&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315523&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315523&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319720",
- "id": "12319720",
- "key": "JOSHUA",
- "name": "Joshua",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319720&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319720&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319720&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319720&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310812",
- "id": "12310812",
- "key": "JSEC",
- "name": "JSecurity",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310812&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310812&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310812&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310812&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310732",
- "id": "12310732",
- "key": "JSPWIKI",
- "name": "JSPWiki",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310732&avatarId=11633",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310732&avatarId=11633",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310732&avatarId=11633",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310732&avatarId=11633"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12061",
- "id": "12061",
- "description": "Apache JSPWiki related projects",
- "name": "JSPWiki"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10401",
- "id": "10401",
- "key": "JUDDI",
- "name": "jUDDI",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10401&avatarId=21858",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10401&avatarId=21858",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10401&avatarId=21858",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10401&avatarId=21858"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10471",
- "id": "10471",
- "description": "jUDDI related projects",
- "name": "jUDDI"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311720",
- "id": "12311720",
- "key": "KAFKA",
- "name": "Kafka",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311720&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311720&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311720&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311720&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10560",
- "id": "10560",
- "description": "Apache Kafka projects",
- "name": "Kafka"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312324",
- "id": "12312324",
- "key": "KALUMET",
- "name": "Kalumet (Retired)",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312324&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312324&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312324&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312324&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310061",
- "id": "12310061",
- "key": "KAND",
- "name": "Kandula",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310061&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310061&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310061&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310061&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10401",
- "id": "10401",
- "description": "Axis and Axis2 related projects",
- "name": "Axis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311140",
- "id": "12311140",
- "key": "KARAF",
- "name": "Karaf",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311140&avatarId=10100",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311140&avatarId=10100",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311140&avatarId=10100",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311140&avatarId=10100"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10500",
- "id": "10500",
- "description": "Apache Karaf related",
- "name": "Karaf"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310848",
- "id": "12310848",
- "key": "KATO",
- "name": "Kato",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310848&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310848&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310848&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310848&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310890",
- "id": "12310890",
- "key": "KI",
- "name": "Ki",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310890&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310890&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310890&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310890&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311202",
- "id": "12311202",
- "key": "KITTY",
- "name": "Kitty",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311202&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311202&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311202&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311202&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319523",
- "id": "12319523",
- "key": "KUDU",
- "name": "Kudu",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319523&avatarId=26096",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319523&avatarId=26096",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319523&avatarId=26096",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319523&avatarId=26096"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316121",
- "id": "12316121",
- "key": "KYLIN",
- "name": "Kylin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316121&avatarId=22168",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316121&avatarId=22168",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316121&avatarId=22168",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316121&avatarId=22168"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310571",
- "id": "12310571",
- "key": "LABS",
- "name": "Labs",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310571&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310571&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310571&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310571&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10210",
- "id": "10210",
- "description": "Labs related projects",
- "name": "Labs"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310740",
- "id": "12310740",
- "key": "HTTPDRAFT",
- "name": "Labs WebArch draft-fielding-http",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310740&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310740&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310740&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310740&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10210",
- "id": "10210",
- "description": "Labs related projects",
- "name": "Labs"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310811",
- "id": "12310811",
- "key": "LEGAL",
- "name": "Legal Discuss",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310811&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310811&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310811&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310811&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10130",
- "id": "10130",
- "description": "General ASF-wide projects.",
- "name": "Apache Software Foundation"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311030",
- "id": "12311030",
- "key": "LIBCLOUD",
- "name": "Libcloud",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311030&avatarId=10315",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311030&avatarId=10315",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311030&avatarId=10315",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311030&avatarId=10315"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10550",
- "id": "10550",
- "description": "Libcloud and related projects",
- "name": "Libcloud"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10550",
- "id": "10550",
- "key": "LOGCXX",
- "name": "Log4cxx",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10550&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10550&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10550&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10550&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10060",
- "id": "10060",
- "description": "Logging related projects",
- "name": "Logging"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310790",
- "id": "12310790",
- "key": "LOG4J2",
- "name": "Log4j 2",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310790&avatarId=23813",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310790&avatarId=23813",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310790&avatarId=23813",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310790&avatarId=23813"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10060",
- "id": "10060",
- "description": "Logging related projects",
- "name": "Logging"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10690",
- "id": "10690",
- "key": "LOG4NET",
- "name": "Log4net",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10690&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10690&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10690&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10690&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10060",
- "id": "10060",
- "description": "Logging related projects",
- "name": "Logging"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310690",
- "id": "12310690",
- "key": "LOG4PHP",
- "name": "Log4php",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310690&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310690&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310690&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310690&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10060",
- "id": "10060",
- "description": "Logging related projects",
- "name": "Logging"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310560",
- "id": "12310560",
- "key": "LOKAHI",
- "name": "Lokahi",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310560&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310560&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310560&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310560&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310110",
- "id": "12310110",
- "key": "LUCENE",
- "name": "Lucene - Core",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310110&avatarId=10061",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310110&avatarId=10061",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310110&avatarId=10061",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310110&avatarId=10061"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10150",
- "id": "10150",
- "description": "Lucene-related projects",
- "name": "Lucene"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310290",
- "id": "12310290",
- "key": "LUCENENET",
- "name": "Lucene.Net",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310290&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310290&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310290&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310290&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10701",
- "id": "10701",
- "key": "LCN4C",
- "name": "Lucene4c",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10701&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10701&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10701&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10701&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310501",
- "id": "12310501",
- "key": "LUCY",
- "name": "Lucy",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310501&avatarId=10647",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310501&avatarId=10647",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310501&avatarId=10647",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310501&avatarId=10647"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/14061",
- "id": "14061",
- "description": "Apache Lucy Related Projects",
- "name": "Lucy"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310751",
- "id": "12310751",
- "key": "MAHOUT",
- "name": "Mahout",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310751&avatarId=10103",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310751&avatarId=10103",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310751&avatarId=10103",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310751&avatarId=10103"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13060",
- "id": "13060",
- "description": "Apache Mahout",
- "name": "Mahout"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311050",
- "id": "12311050",
- "key": "CONNECTORS",
- "name": "ManifoldCF",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311050&avatarId=10323",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311050&avatarId=10323",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311050&avatarId=10323",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311050&avatarId=10323"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11260",
- "id": "11260",
- "description": "ManifoldCF related projects.",
- "name": "ManifoldCF"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314321",
- "id": "12314321",
- "key": "MARMOTTA",
- "name": "Marmotta",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314321&avatarId=16802",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314321&avatarId=16802",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314321&avatarId=16802",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314321&avatarId=16802"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316922",
- "id": "12316922",
- "key": "MNG",
- "name": "Maven",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316922&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316922&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316922&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316922&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317020",
- "id": "12317020",
- "key": "MACR",
- "name": "Maven ACR Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317020&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317020&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317020&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317020&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317021",
- "id": "12317021",
- "key": "MANT",
- "name": "Maven Ant Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317021&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317021&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317021&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317021&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317120",
- "id": "12317120",
- "key": "MANTTASKS",
- "name": "Maven Ant Tasks",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317120&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317120&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317120&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317120&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317121",
- "id": "12317121",
- "key": "MANTRUN",
- "name": "Maven Antrun Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317121&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317121&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317121&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317121&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317122",
- "id": "12317122",
- "key": "ARCHETYPE",
- "name": "Maven Archetype",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317122&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317122&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317122&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317122&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317123",
- "id": "12317123",
- "key": "MARCHETYPES",
- "name": "Maven Archetype Bundles",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317123&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317123&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317123&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317123&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317220",
- "id": "12317220",
- "key": "MASSEMBLY",
- "name": "Maven Assembly Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317220&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317221",
- "id": "12317221",
- "key": "MCHANGELOG",
- "name": "Maven Changelog Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317221&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317221&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317221&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317221&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317222",
- "id": "12317222",
- "key": "MCHANGES",
- "name": "Maven Changes Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317222&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317222&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317222&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317222&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317223",
- "id": "12317223",
- "key": "MCHECKSTYLE",
- "name": "Maven Checkstyle Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317223&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317223&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317223&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317223&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317224",
- "id": "12317224",
- "key": "MCLEAN",
- "name": "Maven Clean Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317224&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317224&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317224&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317224&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317225",
- "id": "12317225",
- "key": "MCOMPILER",
- "name": "Maven Compiler Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317225&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317225&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317225&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317225&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317227",
- "id": "12317227",
- "key": "MDEP",
- "name": "Maven Dependency Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317227&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317227&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317227&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317227&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317228",
- "id": "12317228",
- "key": "MDEPLOY",
- "name": "Maven Deploy Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317228&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317228&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317228&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317228&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317226",
- "id": "12317226",
- "key": "MDOAP",
- "name": "Maven DOAP Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317226&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317226&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317226&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317226&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317229",
- "id": "12317229",
- "key": "MDOCCK",
- "name": "Maven Documentation Checker Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317229&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317229&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317229&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317229&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317230",
- "id": "12317230",
- "key": "DOXIA",
- "name": "Maven Doxia",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317230&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317230&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317230&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317230&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317320",
- "id": "12317320",
- "key": "DOXIASITETOOLS",
- "name": "Maven Doxia Sitetools",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317320&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317320&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317320&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317320&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317420",
- "id": "12317420",
- "key": "DOXIATOOLS",
- "name": "Maven Doxia Tools",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317420&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317420&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317420&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317420&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317422",
- "id": "12317422",
- "key": "MEAR",
- "name": "Maven Ear Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317422&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317422&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317422&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317422&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317423",
- "id": "12317423",
- "key": "MECLIPSE",
- "name": "Maven Eclipse Plugin (RETIRED)",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317423&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317423&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317423&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317423&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317421",
- "id": "12317421",
- "key": "MEJB",
- "name": "Maven EJB Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317421&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317421&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317421&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317421&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317520",
- "id": "12317520",
- "key": "MENFORCER",
- "name": "Maven Enforcer Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317520&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317520&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317520&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317520&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317521",
- "id": "12317521",
- "key": "MGPG",
- "name": "Maven GPG Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317521&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317521&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317521&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317521&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317522",
- "id": "12317522",
- "key": "MPH",
- "name": "Maven Help Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317522&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317522&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317522&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317522&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317523",
- "id": "12317523",
- "key": "MINDEXER",
- "name": "Maven Indexer",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317523&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317523&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317523&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317523&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317524",
- "id": "12317524",
- "key": "MINSTALL",
- "name": "Maven Install Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317524&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317524&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317524&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317524&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317525",
- "id": "12317525",
- "key": "MINVOKER",
- "name": "Maven Invoker Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317525&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317525&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317525&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317525&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317526",
- "id": "12317526",
- "key": "MJAR",
- "name": "Maven JAR Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317526&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317526&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317526&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317526&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317528",
- "id": "12317528",
- "key": "MJARSIGNER",
- "name": "Maven Jar Signer Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317528&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317528&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317528&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317528&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317529",
- "id": "12317529",
- "key": "MJAVADOC",
- "name": "Maven Javadoc Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317529&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317529&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317529&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317529&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317527",
- "id": "12317527",
- "key": "JXR",
- "name": "Maven JXR",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317527&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317527&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317527&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317527&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317530",
- "id": "12317530",
- "key": "MLINKCHECK",
- "name": "Maven Linkcheck Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317530&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317530&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317530&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317530&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317622",
- "id": "12317622",
- "key": "MPATCH",
- "name": "Maven Patch Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317622&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317622&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317622&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317622&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317620",
- "id": "12317620",
- "key": "MPDF",
- "name": "Maven PDF Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317620&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317620&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317620&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317620&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317720",
- "id": "12317720",
- "key": "MPLUGINTESTING",
- "name": "Maven Plugin Testing",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317720&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317720&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317720&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317720&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317820",
- "id": "12317820",
- "key": "MPLUGIN",
- "name": "Maven Plugin Tools",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317820&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317820&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317820&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317820&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317621",
- "id": "12317621",
- "key": "MPMD",
- "name": "Maven PMD Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317621&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317621&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317621&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317621&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311250",
- "id": "12311250",
- "key": "MPOM",
- "name": "Maven POMs",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311250&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311250&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311250&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311250&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317821",
- "id": "12317821",
- "key": "MPIR",
- "name": "Maven Project Info Reports Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317821&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317821&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317821&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317821&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317822",
- "id": "12317822",
- "key": "MNGSITE",
- "name": "Maven Project Web Site",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317822&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317822&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317822&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317822&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317823",
- "id": "12317823",
- "key": "MRAR",
- "name": "Maven Rar Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317823&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317823&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317823&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317823&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317824",
- "id": "12317824",
- "key": "MRELEASE",
- "name": "Maven Release Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317824&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317824&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317824&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317824&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317825",
- "id": "12317825",
- "key": "MRRESOURCES",
- "name": "Maven Remote Resources Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317825&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317825&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317825&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317825&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317826",
- "id": "12317826",
- "key": "MREPOSITORY",
- "name": "Maven Repository Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317826&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317826&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317826&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317826&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317827",
- "id": "12317827",
- "key": "MRESOURCES",
- "name": "Maven Resources Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317827&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317827&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317827&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317827&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317828",
- "id": "12317828",
- "key": "SCM",
- "name": "Maven SCM",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317828&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317828&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317828&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317828&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317920",
- "id": "12317920",
- "key": "MSCMPUB",
- "name": "Maven SCM Publish Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317920&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317920&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317920&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317920&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317921",
- "id": "12317921",
- "key": "MSHADE",
- "name": "Maven Shade Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317921&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317921&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317921&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317921&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317922",
- "id": "12317922",
- "key": "MSHARED",
- "name": "Maven Shared Components",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317922&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317922&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317922&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317922&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317923",
- "id": "12317923",
- "key": "MSITE",
- "name": "Maven Site Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317923&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317923&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317923&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317923&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317926",
- "id": "12317926",
- "key": "MSKINS",
- "name": "Maven Skins",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317926&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317926&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317926&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317926&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317924",
- "id": "12317924",
- "key": "MSOURCES",
- "name": "Maven Source Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317924&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317924&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317924&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317924&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317925",
- "id": "12317925",
- "key": "MSTAGE",
- "name": "Maven Stage Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317925&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317925&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317925&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317925&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12317927",
- "id": "12317927",
- "key": "SUREFIRE",
- "name": "Maven Surefire",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12317927&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12317927&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12317927&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12317927&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318020",
- "id": "12318020",
- "key": "MTOOLCHAINS",
- "name": "Maven Toolchains Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318020&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318020&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318020&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318020&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318120",
- "id": "12318120",
- "key": "MVERIFIER",
- "name": "Maven Verifier Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318120&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318120&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318120&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318120&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318122",
- "id": "12318122",
- "key": "WAGON",
- "name": "Maven Wagon",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318122&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318122&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318122&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318122&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318121",
- "id": "12318121",
- "key": "MWAR",
- "name": "Maven WAR Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318121&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318121&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318121&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318121&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314620",
- "id": "12314620",
- "key": "MAVIBOT",
- "name": "Mavibot",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314620&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314620&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314620&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314620&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311242",
- "id": "12311242",
- "key": "MESOS",
- "name": "Mesos",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311242&avatarId=17056",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311242&avatarId=17056",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311242&avatarId=17056",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311242&avatarId=17056"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12062",
- "id": "12062",
- "description": "Apache Mesos related",
- "name": "Mesos"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319323",
- "id": "12319323",
- "key": "METRON",
- "name": "Metron",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319323&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319323&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319323&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319323&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319526",
- "id": "12319526",
- "key": "MILAGRO",
- "name": "Milagro",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319526&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319526&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319526&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319526&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10670",
- "id": "10670",
- "key": "DIRMINA",
- "name": "MINA",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10670&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10670&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10670&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10670&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10240",
- "id": "10240",
- "description": "MINA related projects",
- "name": "MINA"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310849",
- "id": "12310849",
- "key": "SSHD",
- "name": "MINA SSHD",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310849&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310849&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310849&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310849&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10240",
- "id": "10240",
- "description": "MINA related projects",
- "name": "MINA"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10671",
- "id": "10671",
- "key": "MIRAE",
- "name": "Mirae",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10671&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10671&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10671&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10671&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319223",
- "id": "12319223",
- "key": "MJDEPS",
- "name": "MJDEPS",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319223&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319223&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319223&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319223&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10510",
- "id": "10510",
- "description": "Apache Maven related projects",
- "name": "Maven"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319820",
- "id": "12319820",
- "key": "MNEMONIC",
- "name": "Mnemonic",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319820&avatarId=26952",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319820&avatarId=26952",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319820&avatarId=26952",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319820&avatarId=26952"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10640",
- "id": "10640",
- "key": "MODPYTHON",
- "name": "mod_python",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10640&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10640&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10640&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10640&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10080",
- "id": "10080",
- "description": "httpd and its related projects",
- "name": "HTTP Server"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314428",
- "id": "12314428",
- "key": "MRQL",
- "name": "MRQL",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314428&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314428&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314428&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314428&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311292",
- "id": "12311292",
- "key": "MRUNIT",
- "name": "MRUnit",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311292&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311292&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311292&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311292&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11968",
- "id": "11968",
- "description": "Apache MRUnit related projects",
- "name": "MRUnit"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10614",
- "id": "10614",
- "key": "MUSE",
- "name": "Muse",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10614&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10614&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10614&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10614&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310300",
- "id": "12310300",
- "key": "ADFFACES",
- "name": "MyFaces ADF-Faces",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310300&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310300&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310300&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310300&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311071",
- "id": "12311071",
- "key": "EXTCDI",
- "name": "MyFaces CODI",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311071&avatarId=10081",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311071&avatarId=10081",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311071&avatarId=10081",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311071&avatarId=10081"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310846",
- "id": "12310846",
- "key": "MFCOMMONS",
- "name": "MyFaces Commons",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310846&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310846&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310846&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310846&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10600",
- "id": "10600",
- "key": "MYFACES",
- "name": "MyFaces Core",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10600&avatarId=10079",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10600&avatarId=10079",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10600&avatarId=10079",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10600&avatarId=10079"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310964",
- "id": "12310964",
- "key": "EXTSCRIPT",
- "name": "MyFaces Extensions Scripting",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310964&avatarId=10082",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310964&avatarId=10082",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310964&avatarId=10082",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310964&avatarId=10082"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310821",
- "id": "12310821",
- "key": "EXTVAL",
- "name": "MyFaces Extensions Validator",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310821&avatarId=10080",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310821&avatarId=10080",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310821&avatarId=10080",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310821&avatarId=10080"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311280",
- "id": "12311280",
- "key": "MFHTML5",
- "name": "MyFaces HTML5 Component Library",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311280&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311280&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311280&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311280&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310691",
- "id": "12310691",
- "key": "ORCHESTRA",
- "name": "MyFaces Orchestra",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310691&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310691&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310691&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310691&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310733",
- "id": "12310733",
- "key": "PORTLETBRIDGE",
- "name": "MyFaces Portlet Bridge",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310733&avatarId=10134",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310733&avatarId=10134",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310733&avatarId=10134",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310733&avatarId=10134"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310951",
- "id": "12310951",
- "key": "MYFACESTEST",
- "name": "MyFaces Test",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310951&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310951&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310951&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310951&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310273",
- "id": "12310273",
- "key": "TOBAGO",
- "name": "MyFaces Tobago",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310273&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310273&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310273&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310273&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310272",
- "id": "12310272",
- "key": "TOMAHAWK",
- "name": "MyFaces Tomahawk",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310272&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310272&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310272&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310272&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310661",
- "id": "12310661",
- "key": "TRINIDAD",
- "name": "MyFaces Trinidad",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310661&avatarId=10133",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310661&avatarId=10133",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310661&avatarId=10133",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310661&avatarId=10133"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10100",
- "id": "10100",
- "description": "Apache MyFaces related projects",
- "name": "MyFaces"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319221",
- "id": "12319221",
- "key": "MYNEWT",
- "name": "Mynewt",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319221&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319221&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319221&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319221&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12320020",
- "id": "12320020",
- "key": "MYNEWTDOC",
- "name": "MyNewt Docs",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12320020&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12320020&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12320020&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12320020&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318522",
- "id": "12318522",
- "key": "MYRIAD",
- "name": "Myriad",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318522&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318522&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318522&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318522&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311260",
- "id": "12311260",
- "key": "NEETHI",
- "name": "Neethi",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311260&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311260&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311260&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311260&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10028",
- "id": "10028",
- "description": "Web services projects",
- "name": "Web Services"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311182",
- "id": "12311182",
- "key": "NPANDAY",
- "name": "NPanday",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311182&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311182&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311182&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311182&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10680",
- "id": "10680",
- "key": "NUTCH",
- "name": "Nutch",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10680&avatarId=10426",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10680&avatarId=10426",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10680&avatarId=10426",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10680&avatarId=10426"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10430",
- "id": "10430",
- "description": "Apache Nutch",
- "name": "Nutch"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311151",
- "id": "12311151",
- "key": "NUVEM",
- "name": "Nuvem",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311151&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311151&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311151&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311151&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310270",
- "id": "12310270",
- "key": "ODE",
- "name": "ODE",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310270&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310270&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310270&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310270&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10311",
- "id": "10311",
- "description": "",
- "name": "ODE"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314525",
- "id": "12314525",
- "key": "JACOB",
- "name": "ODE JaCOb",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314525&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314525&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314525&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314525&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10311",
- "id": "10311",
- "description": "",
- "name": "ODE"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312220",
- "id": "12312220",
- "key": "ODFTOOLKIT",
- "name": "ODF Toolkit",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312220&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310500",
- "id": "12310500",
- "key": "OFBIZ",
- "name": "OFBiz",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310500&avatarId=18505",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310500&avatarId=18505",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310500&avatarId=18505",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310500&avatarId=18505"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10230",
- "id": "10230",
- "description": "The Open for Business Project",
- "name": "OFBiz"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10700",
- "id": "10700",
- "key": "OJB",
- "name": "OJB",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10700&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10700&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10700&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10700&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314520",
- "id": "12314520",
- "key": "OLINGO",
- "name": "Olingo",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314520&avatarId=18497",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314520&avatarId=18497",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314520&avatarId=18497",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314520&avatarId=18497"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12460",
- "id": "12460",
- "description": "Olingo related projects",
- "name": "Olingo"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310839",
- "id": "12310839",
- "key": "OLIO",
- "name": "Olio",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310839&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310839&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310839&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310839&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311070",
- "id": "12311070",
- "key": "OODT",
- "name": "OODT",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311070&avatarId=10109",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311070&avatarId=10109",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311070&avatarId=10109",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311070&avatarId=10109"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10483",
- "id": "10483",
- "description": "Apache OODT related",
- "name": "OODT"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311620",
- "id": "12311620",
- "key": "OOZIE",
- "name": "Oozie",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311620&avatarId=15903",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311620&avatarId=15903",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311620&avatarId=15903",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311620&avatarId=15903"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11967",
- "id": "11967",
- "description": "Apache Oozie related projects",
- "name": "Oozie"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310943",
- "id": "12310943",
- "key": "ORP",
- "name": "Open Relevance Project ",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310943&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310943&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310943&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310943&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10150",
- "id": "10150",
- "description": "Lucene-related projects",
- "name": "Lucene"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310530",
- "id": "12310530",
- "key": "OPENEJB",
- "name": "OpenEJB",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310530&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310530&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310530&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310530&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10251",
- "id": "10251",
- "description": "",
- "name": "OpenEJB"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310834",
- "id": "12310834",
- "key": "OEP",
- "name": "OpenEJB Eclipse Plugin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310834&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310834&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310834&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310834&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10251",
- "id": "10251",
- "description": "",
- "name": "OpenEJB"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310351",
- "id": "12310351",
- "key": "OPENJPA",
- "name": "OpenJPA",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310351&avatarId=10043",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310351&avatarId=10043",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310351&avatarId=10043",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310351&avatarId=10043"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10252",
- "id": "10252",
- "description": "",
- "name": "OpenJPA"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312720",
- "id": "12312720",
- "key": "OPENMEETINGS",
- "name": "Openmeetings",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312720&avatarId=10008",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312720&avatarId=10008",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312720&avatarId=10008",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312720&avatarId=10008"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11862",
- "id": "11862",
- "description": "Apache Openmeetings related",
- "name": "Openmeetings"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311215",
- "id": "12311215",
- "key": "OPENNLP",
- "name": "OpenNLP",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311215&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311215&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311215&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311215&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11962",
- "id": "11962",
- "description": "Apache OpenNLP related projects",
- "name": "OpenNLP"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310844",
- "id": "12310844",
- "key": "OWB",
- "name": "OpenWebBeans",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310844&avatarId=10390",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310844&avatarId=10390",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310844&avatarId=10390",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310844&avatarId=10390"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10370",
- "id": "10370",
- "description": "OpenWebBeans Project",
- "name": "OpenWebBeans"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318320",
- "id": "12318320",
- "key": "ORC",
- "name": "Orc",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318320&avatarId=24648",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318320&avatarId=24648",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318320&avatarId=24648",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318320&avatarId=24648"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13361",
- "id": "13361",
- "description": "Apache Orc",
- "name": "Orc"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315521",
- "id": "12315521",
- "key": "PARQUET",
- "name": "Parquet",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315521&avatarId=20034",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315521&avatarId=20034",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315521&avatarId=20034",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315521&avatarId=20034"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310760",
- "id": "12310760",
- "key": "PDFBOX",
- "name": "PDFBox",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310760&avatarId=11734",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310760&avatarId=11734",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310760&avatarId=11734",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310760&avatarId=11734"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10360",
- "id": "10360",
- "description": "Apache PDFBox projects",
- "name": "PDFBox"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315120",
- "id": "12315120",
- "key": "PHOENIX",
- "name": "Phoenix",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315120&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315120&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315120&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315120&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12560",
- "id": "12560",
- "description": "Apache Phoenix",
- "name": "Phoenix"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310832",
- "id": "12310832",
- "key": "PHOTARK",
- "name": "PhotArk",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310832&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310832&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310832&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310832&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310730",
- "id": "12310730",
- "key": "PIG",
- "name": "Pig",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310730&avatarId=10934",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310730&avatarId=10934",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310730&avatarId=10934",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310730&avatarId=10934"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10292",
- "id": "10292",
- "description": "Scalable Distributed Computing",
- "name": "Hadoop"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310863",
- "id": "12310863",
- "key": "PIVOT",
- "name": "Pivot",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310863&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310863&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310863&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310863&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10382",
- "id": "10382",
- "description": "Pivot related projects",
- "name": "Pivot"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10560",
- "id": "10560",
- "key": "PLUTO",
- "name": "Pluto",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10560&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10560&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10560&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10560&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10050",
- "id": "10050",
- "description": "Apache Portals",
- "name": "Portals"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312620",
- "id": "12312620",
- "key": "PODLINGNAMESEARCH",
- "name": "Podling Suitable Names Search",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312620&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312620&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312620&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312620&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10741",
- "id": "10741",
- "key": "PORTALS",
- "name": "Portals",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10741&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10741&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10741&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10741&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10050",
- "id": "10050",
- "description": "Apache Portals",
- "name": "Portals"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310870",
- "id": "12310870",
- "key": "APA",
- "name": "Portals Apps",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310870&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310870&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310870&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310870&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10050",
- "id": "10050",
- "description": "Apache Portals",
- "name": "Portals"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10720",
- "id": "10720",
- "key": "PB",
- "name": "Portals Bridges",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10720&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10720&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10720&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10720&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10050",
- "id": "10050",
- "description": "Apache Portals",
- "name": "Portals"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314423",
- "id": "12314423",
- "key": "PROVISIONR",
- "name": "Provisionr",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314423&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314423&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314423&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314423&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310837",
- "id": "12310837",
- "key": "PRC",
- "name": "Public Relations",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310837&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310837&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310837&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310837&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10130",
- "id": "10130",
- "description": "General ASF-wide projects.",
- "name": "Apache Software Foundation"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10613",
- "id": "10613",
- "key": "HERMES",
- "name": "Pubscribe",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10613&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10613&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10613&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10613&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310871",
- "id": "12310871",
- "key": "PYLUCENE",
- "name": "PyLucene",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310871&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310871&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310871&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310871&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10150",
- "id": "10150",
- "description": "Lucene-related projects",
- "name": "Lucene"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310520",
- "id": "12310520",
- "key": "QPID",
- "name": "Qpid",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310520&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310520&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310520&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310520&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10450",
- "id": "10450",
- "description": "Apache Qpid and related projects",
- "name": "Qpid"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315321",
- "id": "12315321",
- "key": "DISPATCH",
- "name": "Qpid Dispatch",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315321&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315321&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315321&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315321&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10450",
- "id": "10450",
- "description": "Apache Qpid and related projects",
- "name": "Qpid"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314524",
- "id": "12314524",
- "key": "QPIDJMS",
- "name": "Qpid JMS",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314524&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314524&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314524&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314524&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10450",
- "id": "10450",
- "description": "Apache Qpid and related projects",
- "name": "Qpid"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313720",
- "id": "12313720",
- "key": "PROTON",
- "name": "Qpid Proton",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313720&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313720&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313720&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313720&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10450",
- "id": "10450",
- "description": "Apache Qpid and related projects",
- "name": "Qpid"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319623",
- "id": "12319623",
- "key": "QUARKS",
- "name": "Quarks",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319623&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319623&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319623&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319623&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310610",
- "id": "12310610",
- "key": "RAMPART",
- "name": "Rampart",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310610&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310610&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310610&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310610&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10401",
- "id": "10401",
- "description": "Axis and Axis2 related projects",
- "name": "Axis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310630",
- "id": "12310630",
- "key": "RAMPARTC",
- "name": "Rampart/C",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310630&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310630&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310630&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310630&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10401",
- "id": "10401",
- "description": "Axis and Axis2 related projects",
- "name": "Axis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315621",
- "id": "12315621",
- "key": "RANGER",
- "name": "Ranger",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315621&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315621&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315621&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315621&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311290",
- "id": "12311290",
- "key": "RAVE",
- "name": "Rave",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311290&avatarId=10353",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311290&avatarId=10353",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311290&avatarId=10353",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311290&avatarId=10353"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315820",
- "id": "12315820",
- "key": "REEF",
- "name": "REEF",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315820&avatarId=24672",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315820&avatarId=24672",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315820&avatarId=24672",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315820&avatarId=24672"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13862",
- "id": "13862",
- "description": "Apache REEF project",
- "name": "REEF"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310600",
- "id": "12310600",
- "key": "RIVER",
- "name": "River",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310600&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310600&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310600&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310600&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10530",
- "id": "10530",
- "description": "Apache River",
- "name": "River"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319020",
- "id": "12319020",
- "key": "RYA",
- "name": "Rya",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319020&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319020&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319020&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319020&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319320",
- "id": "12319320",
- "key": "S2GRAPH",
- "name": "S2Graph",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319320&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319320&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319320&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319320&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316220",
- "id": "12316220",
- "key": "SAMOA",
- "name": "SAMOA",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316220&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314526",
- "id": "12314526",
- "key": "SAMZA",
- "name": "Samza",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314526&avatarId=17163",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314526&avatarId=17163",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314526&avatarId=17163",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314526&avatarId=17163"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12960",
- "id": "12960",
- "description": "Apache Samza",
- "name": "Samza"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310062",
- "id": "12310062",
- "key": "SAND",
- "name": "Sandesha",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310062&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310062&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310062&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310062&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10401",
- "id": "10401",
- "description": "Axis and Axis2 related projects",
- "name": "Axis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310190",
- "id": "12310190",
- "key": "SANDESHA2",
- "name": "Sandesha2",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310190&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310190&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310190&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310190&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10401",
- "id": "10401",
- "description": "Axis and Axis2 related projects",
- "name": "Axis"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310540",
- "id": "12310540",
- "key": "SANDESHA2C",
- "name": "Sandesha2/C",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310540&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310540&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310540&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310540&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311231",
- "id": "12311231",
- "key": "SANTUARIO",
- "name": "Santuario",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311231&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311231&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311231&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311231&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310622",
- "id": "12310622",
- "key": "SAVAN",
- "name": "Savan",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310622&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310622&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310622&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310622&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10610",
- "id": "10610",
- "key": "SCOUT",
- "name": "Scout",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10610&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10610&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10610&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10610&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10471",
- "id": "10471",
- "description": "jUDDI related projects",
- "name": "jUDDI"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314720",
- "id": "12314720",
- "key": "SENTRY",
- "name": "Sentry",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314720&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314720&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314720&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314720&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/14060",
- "id": "14060",
- "description": "Apache Sentry",
- "name": "Sentry"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318820",
- "id": "12318820",
- "key": "SERF",
- "name": "serf",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318820&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318820&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318820&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318820&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311206",
- "id": "12311206",
- "key": "SM",
- "name": "ServiceMix",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311206&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311206&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311206&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311206&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10480",
- "id": "10480",
- "description": "ServiceMix Enterprise Service Bus",
- "name": "ServiceMix"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311208",
- "id": "12311208",
- "key": "SMX4",
- "name": "ServiceMix 4",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311208&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311208&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311208&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311208&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10480",
- "id": "10480",
- "description": "ServiceMix Enterprise Service Bus",
- "name": "ServiceMix"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311212",
- "id": "12311212",
- "key": "SMXCOMP",
- "name": "ServiceMix Components",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311212&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311212&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311212&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311212&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10480",
- "id": "10480",
- "description": "ServiceMix Enterprise Service Bus",
- "name": "ServiceMix"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311205",
- "id": "12311205",
- "key": "SMX4KNL",
- "name": "ServiceMix Kernel",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311205&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311205&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311205&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311205&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311209",
- "id": "12311209",
- "key": "SMX4NMR",
- "name": "ServiceMix NMR",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311209&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311209&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311209&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311209&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10480",
- "id": "10480",
- "description": "ServiceMix Enterprise Service Bus",
- "name": "ServiceMix"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311040",
- "id": "12311040",
- "key": "SHALE",
- "name": "Shale",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311040&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311040&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311040&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311040&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310741",
- "id": "12310741",
- "key": "SHINDIG",
- "name": "Shindig",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310741&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310741&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310741&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310741&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10400",
- "id": "10400",
- "description": "Shindig related projects",
- "name": "Shindig"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310950",
- "id": "12310950",
- "key": "SHIRO",
- "name": "Shiro",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310950&avatarId=10099",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310950&avatarId=10099",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310950&avatarId=10099",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310950&avatarId=10099"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10460",
- "id": "10460",
- "description": "Apache Shiro related projects",
- "name": "Shiro"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316720",
- "id": "12316720",
- "key": "SINGA",
- "name": "Singa",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316720&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316720&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316720&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316720&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314925",
- "id": "12314925",
- "key": "SIRONA",
- "name": "Sirona",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314925&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314925&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314925&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314925&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315422",
- "id": "12315422",
- "key": "SLIDER",
- "name": "Slider",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315422&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315422&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315422&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315422&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310710",
- "id": "12310710",
- "key": "SLING",
- "name": "Sling",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310710&avatarId=10034",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310710&avatarId=10034",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310710&avatarId=10034",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310710&avatarId=10034"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10340",
- "id": "10340",
- "description": "Apache Sling related projects",
- "name": "Sling"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10459",
- "id": "10459",
- "key": "SOAP",
- "name": "SOAP",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10459&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10459&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10459&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10459&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310230",
- "id": "12310230",
- "key": "SOLR",
- "name": "Solr",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310230&avatarId=22151",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310230&avatarId=22151",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310230&avatarId=22151",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310230&avatarId=22151"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10150",
- "id": "10150",
- "description": "Lucene-related projects",
- "name": "Lucene"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315420",
- "id": "12315420",
- "key": "SPARK",
- "name": "Spark",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315420&avatarId=19440",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315420&avatarId=19440",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315420&avatarId=19440",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315420&avatarId=19440"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311072",
- "id": "12311072",
- "key": "SIS",
- "name": "Spatial Information Systems",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311072&avatarId=17542",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311072&avatarId=17542",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311072&avatarId=17542",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311072&avatarId=17542"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311320",
- "id": "12311320",
- "key": "SQOOP",
- "name": "Sqoop",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311320&avatarId=12143",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311320&avatarId=12143",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311320&avatarId=12143",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311320&avatarId=12143"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10960",
- "id": "10960",
- "description": "Sqoop related projects",
- "name": "Sqoop"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311214",
- "id": "12311214",
- "key": "STANBOL",
- "name": "Stanbol",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311214&avatarId=16330",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311214&avatarId=16330",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311214&avatarId=16330",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311214&avatarId=16330"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315220",
- "id": "12315220",
- "key": "STEVE",
- "name": "Steve",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315220&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12260",
- "id": "12260",
- "description": "Apache STeVe",
- "name": "Steve"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311203",
- "id": "12311203",
- "key": "STOMP",
- "name": "Stomp Specification",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311203&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311203&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311203&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311203&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11160",
- "id": "11160",
- "description": "ActiveMQ",
- "name": "ActiveMQ"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310845",
- "id": "12310845",
- "key": "STONEHENGE",
- "name": "Stonehenge",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310845&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310845&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310845&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310845&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314521",
- "id": "12314521",
- "key": "STRATOS",
- "name": "Stratos",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314521&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314521&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314521&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314521&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/12760",
- "id": "12760",
- "description": "Apache Stratos related",
- "name": "Stratos"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314320",
- "id": "12314320",
- "key": "STREAMS",
- "name": "Streams",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314320&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314320&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314320&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314320&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311043",
- "id": "12311043",
- "key": "STR",
- "name": "Struts 1",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311043&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311043&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311043&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311043&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10380",
- "id": "10380",
- "description": "struts.apache.org",
- "name": "Struts Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311041",
- "id": "12311041",
- "key": "WW",
- "name": "Struts 2",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311041&avatarId=22177",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311041&avatarId=22177",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311041&avatarId=22177",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311041&avatarId=22177"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10380",
- "id": "10380",
- "description": "struts.apache.org",
- "name": "Struts Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311044",
- "id": "12311044",
- "key": "SB",
- "name": "Struts Sandbox",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311044&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311044&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311044&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311044&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10380",
- "id": "10380",
- "description": "struts.apache.org",
- "name": "Struts Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311045",
- "id": "12311045",
- "key": "SITE",
- "name": "Struts Shared Resources",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311045&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311045&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311045&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311045&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10380",
- "id": "10380",
- "description": "struts.apache.org",
- "name": "Struts Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318828",
- "id": "12318828",
- "key": "SVN",
- "name": "Subversion",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318828&avatarId=24978",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318828&avatarId=24978",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318828&avatarId=24978",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318828&avatarId=24978"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310201",
- "id": "12310201",
- "key": "SYNAPSE",
- "name": "Synapse",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310201&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310201&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310201&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310201&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10291",
- "id": "10291",
- "description": "Apache Synapse ESB related projects",
- "name": "Synapse"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313120",
- "id": "12313120",
- "key": "SYNCOPE",
- "name": "Syncope",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313120&avatarId=13444",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313120&avatarId=13444",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313120&avatarId=13444",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313120&avatarId=13444"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11760",
- "id": "11760",
- "description": "Syncope related projects",
- "name": "Syncope"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319522",
- "id": "12319522",
- "key": "SYSTEMML",
- "name": "SystemML",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319522&avatarId=25877",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319522&avatarId=25877",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319522&avatarId=25877",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319522&avatarId=25877"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314424",
- "id": "12314424",
- "key": "TAJO",
- "name": "Tajo",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314424&avatarId=17015",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314424&avatarId=17015",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314424&avatarId=17015",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314424&avatarId=17015"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12315925",
- "id": "12315925",
- "key": "TAMAYA",
- "name": "Tamaya",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12315925&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12315925&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12315925&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12315925&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10573",
- "id": "10573",
- "key": "TAPESTRY",
- "name": "Tapestry",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10573&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10573&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10573&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10573&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10190",
- "id": "10190",
- "description": "Apache Tapestry related projects",
- "name": "Tapestry"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310833",
- "id": "12310833",
- "key": "TAP5",
- "name": "Tapestry 5",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310833&avatarId=10038",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310833&avatarId=10038",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310833&avatarId=10038",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310833&avatarId=10038"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10190",
- "id": "10190",
- "description": "Apache Tapestry related projects",
- "name": "Tapestry"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310841",
- "id": "12310841",
- "key": "TASHI",
- "name": "Tashi",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310841&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310841&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310841&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310841&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12320123",
- "id": "12320123",
- "key": "TEPHRA",
- "name": "Tephra",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12320123&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12320123&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12320123&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12320123&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10650",
- "id": "10650",
- "key": "TST",
- "name": "Test Project",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10650&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10650&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10650&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10650&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316125",
- "id": "12316125",
- "key": "TESTY",
- "name": "testy",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316125&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316125&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316125&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316125&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310651",
- "id": "12310651",
- "key": "TEXEN",
- "name": "Texen",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310651&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310651&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310651&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310651&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10200",
- "id": "10200",
- "description": "Apache Velocity related projects",
- "name": "Velocity"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310800",
- "id": "12310800",
- "key": "THRIFT",
- "name": "Thrift",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310800&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310800&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310800&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310800&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10470",
- "id": "10470",
- "description": "Thrift related projects",
- "name": "Thrift"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310631",
- "id": "12310631",
- "key": "TIKA",
- "name": "Tika",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310631&avatarId=10111",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310631&avatarId=10111",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310631&avatarId=10111",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310631&avatarId=10111"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10431",
- "id": "10431",
- "description": "Apache Tika",
- "name": "Tika"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311046",
- "id": "12311046",
- "key": "TILES",
- "name": "Tiles",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311046&avatarId=12539",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311046&avatarId=12539",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311046&avatarId=12539",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311046&avatarId=12539"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10381",
- "id": "10381",
- "description": "tiles.apache.org",
- "name": "Tiles Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311422",
- "id": "12311422",
- "key": "AUTOTAG",
- "name": "Tiles Autotag",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311422&avatarId=12540",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311422&avatarId=12540",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311422&avatarId=12540",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311422&avatarId=12540"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10381",
- "id": "10381",
- "description": "tiles.apache.org",
- "name": "Tiles Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311423",
- "id": "12311423",
- "key": "TEVAL",
- "name": "Tiles Eval",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311423&avatarId=12542",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311423&avatarId=12542",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311423&avatarId=12542",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311423&avatarId=12542"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10381",
- "id": "10381",
- "description": "tiles.apache.org",
- "name": "Tiles Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311421",
- "id": "12311421",
- "key": "TREQ",
- "name": "Tiles Request",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311421&avatarId=12541",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311421&avatarId=12541",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311421&avatarId=12541",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311421&avatarId=12541"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10381",
- "id": "10381",
- "description": "tiles.apache.org",
- "name": "Tiles Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311049",
- "id": "12311049",
- "key": "TILESSB",
- "name": "Tiles Sandbox",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311049&avatarId=12543",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311049&avatarId=12543",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311049&avatarId=12543",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311049&avatarId=12543"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10381",
- "id": "10381",
- "description": "tiles.apache.org",
- "name": "Tiles Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311048",
- "id": "12311048",
- "key": "TILESSHARED",
- "name": "Tiles Shared Resources",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311048&avatarId=13430",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311048&avatarId=13430",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311048&avatarId=13430",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311048&avatarId=13430"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10381",
- "id": "10381",
- "description": "tiles.apache.org",
- "name": "Tiles Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311047",
- "id": "12311047",
- "key": "TILESSHOW",
- "name": "Tiles Showcase",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311047&avatarId=13431",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311047&avatarId=13431",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311047&avatarId=13431",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311047&avatarId=13431"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10381",
- "id": "10381",
- "description": "tiles.apache.org",
- "name": "Tiles Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316520",
- "id": "12316520",
- "key": "TINKERPOP",
- "name": "TinkerPop",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316520&avatarId=23137",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316520&avatarId=23137",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316520&avatarId=23137",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316520&avatarId=23137"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12312320",
- "id": "12312320",
- "key": "TOMEE",
- "name": "TomEE",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12312320&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12312320&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12312320&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12312320&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10251",
- "id": "10251",
- "description": "",
- "name": "OpenEJB"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314322",
- "id": "12314322",
- "key": "TATPI",
- "name": "TomEE Arquillian Test Porting Initiative",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314322&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314322&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314322&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314322&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12319524",
- "id": "12319524",
- "key": "TOREE",
- "name": "TOREE",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12319524&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12319524&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12319524&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12319524&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310280",
- "id": "12310280",
- "key": "TORQUE",
- "name": "Torque",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310280&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310280&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310280&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310280&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10090",
- "id": "10090",
- "description": "DB related projects",
- "name": "DB"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310542",
- "id": "12310542",
- "key": "TORQUEOLD",
- "name": "Torque issues (old)",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310542&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310542&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310542&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310542&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10090",
- "id": "10090",
- "description": "DB related projects",
- "name": "DB"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310963",
- "id": "12310963",
- "key": "TS",
- "name": "Traffic Server",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310963&avatarId=16231",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310963&avatarId=16231",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310963&avatarId=16231",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310963&avatarId=16231"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11360",
- "id": "11360",
- "description": "Traffic Server related",
- "name": "Traffic Server"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310592",
- "id": "12310592",
- "key": "DIRTSEC",
- "name": "Triplesec",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310592&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310592&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310592&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310592&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10041",
- "id": "10041",
- "description": "Apache Directory Project Category",
- "name": "Directory"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310620",
- "id": "12310620",
- "key": "TRIPLES",
- "name": "TripleSoup",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310620&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310620&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310620&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310620&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10742",
- "id": "10742",
- "key": "TSIK",
- "name": "TSIK",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10742&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10742&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10742&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10742&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310090",
- "id": "12310090",
- "key": "TRB",
- "name": "Turbine",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310090&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310090&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310090&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310090&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10250",
- "id": "10250",
- "description": "",
- "name": "Turbine"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310210",
- "id": "12310210",
- "key": "TUSCANY",
- "name": "Tuscany",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310210&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310210&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310210&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310210&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10472",
- "id": "10472",
- "description": "",
- "name": "Tuscany"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310570",
- "id": "12310570",
- "key": "UIMA",
- "name": "UIMA",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310570&avatarId=17532",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310570&avatarId=17532",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310570&avatarId=17532",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310570&avatarId=17532"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10410",
- "id": "10410",
- "description": "Apache UIMA",
- "name": "UIMA"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314923",
- "id": "12314923",
- "key": "USERGRID",
- "name": "Usergrid",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314923&avatarId=25971",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314923&avatarId=25971",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314923&avatarId=25971",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314923&avatarId=25971"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13561",
- "id": "13561",
- "description": "Apache Usergrid",
- "name": "Usergrid"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310840",
- "id": "12310840",
- "key": "VCL",
- "name": "VCL",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310840&avatarId=15052",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310840&avatarId=15052",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310840&avatarId=15052",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310840&avatarId=15052"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310104",
- "id": "12310104",
- "key": "VELOCITY",
- "name": "Velocity",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310104&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310104&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310104&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310104&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10200",
- "id": "10200",
- "description": "Apache Velocity related projects",
- "name": "Velocity"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311130",
- "id": "12311130",
- "key": "VELOCITYSB",
- "name": "Velocity Sandbox",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311130&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311130&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311130&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311130&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10200",
- "id": "10200",
- "description": "Apache Velocity related projects",
- "name": "Velocity"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310130",
- "id": "12310130",
- "key": "VELTOOLS",
- "name": "Velocity Tools",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310130&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310130&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310130&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310130&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10200",
- "id": "10200",
- "description": "Apache Velocity related projects",
- "name": "Velocity"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310960",
- "id": "12310960",
- "key": "VXQUERY",
- "name": "VXQuery",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310960&avatarId=22416",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310960&avatarId=22416",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310960&avatarId=22416",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310960&avatarId=22416"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310930",
- "id": "12310930",
- "key": "VYSPER",
- "name": "VYSPER",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310930&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310930&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310930&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310930&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10240",
- "id": "10240",
- "description": "MINA related projects",
- "name": "MINA"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310220",
- "id": "12310220",
- "key": "WADI",
- "name": "WADI",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310220&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310220&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310220&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310220&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311240",
- "id": "12311240",
- "key": "WAVE",
- "name": "Wave",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311240&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311240&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311240&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311240&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318723",
- "id": "12318723",
- "key": "WHIMSY",
- "name": "Whimsy",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318723&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318723&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318723&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318723&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13560",
- "id": "13560",
- "description": "Apache Whimsy Project",
- "name": "Whimsy"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310561",
- "id": "12310561",
- "key": "WICKET",
- "name": "Wicket",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310561&avatarId=17538",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310561&avatarId=17538",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310561&avatarId=17538",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310561&avatarId=17538"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10270",
- "id": "10270",
- "description": "Apache Wicket related projects",
- "name": "Wicket"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310940",
- "id": "12310940",
- "key": "WINK",
- "name": "Wink",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310940&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310940&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310940&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310940&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10710",
- "id": "10710",
- "key": "WODEN",
- "name": "Woden",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10710&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10710&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10710&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10710&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10028",
- "id": "10028",
- "description": "Web services projects",
- "name": "Web Services"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310961",
- "id": "12310961",
- "key": "WOOKIE",
- "name": "Wookie",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310961&avatarId=13438",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310961&avatarId=13438",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310961&avatarId=13438",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310961&avatarId=13438"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310250",
- "id": "12310250",
- "key": "WSCOMMONS",
- "name": "WS-Commons",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310250&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310250&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310250&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310250&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10028",
- "id": "10028",
- "description": "Web services projects",
- "name": "Web Services"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10612",
- "id": "10612",
- "key": "APOLLO",
- "name": "WSRF",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10612&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10612&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10612&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10612&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10464",
- "id": "10464",
- "key": "WSRP4J",
- "name": "WSRP4J",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10464&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10464&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10464&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10464&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310063",
- "id": "12310063",
- "key": "WSS",
- "name": "WSS4J",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310063&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310063&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310063&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310063&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10028",
- "id": "10028",
- "description": "Web services projects",
- "name": "Web Services"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10582",
- "id": "10582",
- "key": "XALANC",
- "name": "XalanC",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10582&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10582&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10582&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10582&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11460",
- "id": "11460",
- "description": "Xalan Related Projects",
- "name": "Xalan"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10584",
- "id": "10584",
- "key": "XALANJ",
- "name": "XalanJ2",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10584&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10584&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10584&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10584&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/11460",
- "id": "11460",
- "description": "Xalan Related Projects",
- "name": "Xalan"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310502",
- "id": "12310502",
- "key": "XAP",
- "name": "XAP",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310502&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310502&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310502&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310502&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310312",
- "id": "12310312",
- "key": "XBEAN",
- "name": "XBean",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310312&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310312&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310312&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310312&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10061",
- "id": "10061",
- "description": "Apache J2EE project",
- "name": "Geronimo"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10510",
- "id": "10510",
- "key": "XERCESC",
- "name": "Xerces-C++",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10510&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10510&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10510&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10510&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10040",
- "id": "10040",
- "description": "XML related projects",
- "name": "XML"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310241",
- "id": "12310241",
- "key": "XERCESP",
- "name": "Xerces-P",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310241&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310241&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310241&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310241&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10040",
- "id": "10040",
- "description": "XML related projects",
- "name": "XML"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10520",
- "id": "10520",
- "key": "XERCESJ",
- "name": "Xerces2-J",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10520&avatarId=10036",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10520&avatarId=10036",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10520&avatarId=10036",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10520&avatarId=10036"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10040",
- "id": "10040",
- "description": "XML related projects",
- "name": "XML"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311230",
- "id": "12311230",
- "key": "XMLCOMMONS",
- "name": "XML Commons",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311230&avatarId=10037",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311230&avatarId=10037",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311230&avatarId=10037",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311230&avatarId=10037"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10040",
- "id": "10040",
- "description": "XML related projects",
- "name": "XML"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10465",
- "id": "10465",
- "key": "XMLRPC",
- "name": "XML-RPC",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10465&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10465&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10465&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10465&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10220",
- "id": "10220",
- "description": "Retired projects - These projects have no active community and are read-only.",
- "name": "Retired"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/10436",
- "id": "10436",
- "key": "XMLBEANS",
- "name": "XMLBeans",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=10436&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=10436&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=10436&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=10436&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10110",
- "id": "10110",
- "description": "XMLBeans related projects",
- "name": "XMLBeans"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12314221",
- "id": "12314221",
- "key": "XGC",
- "name": "XMLGraphicsCommons",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12314221&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12314221&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12314221&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12314221&avatarId=10011"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311191",
- "id": "12311191",
- "key": "XMLSCHEMA",
- "name": "XmlSchema",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311191&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311191&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311191&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311191&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10028",
- "id": "10028",
- "description": "Web services projects",
- "name": "Web Services"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12313523",
- "id": "12313523",
- "key": "XW",
- "name": "XWork",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12313523&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12313523&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12313523&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12313523&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10380",
- "id": "10380",
- "description": "struts.apache.org",
- "name": "Struts Framework"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12318920",
- "id": "12318920",
- "key": "YETUS",
- "name": "Yetus",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12318920&avatarId=25263",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12318920&avatarId=25263",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12318920&avatarId=25263",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12318920&avatarId=25263"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/13660",
- "id": "13660",
- "description": "Apache Yetus project",
- "name": "Yetus"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310271",
- "id": "12310271",
- "key": "YOKO",
- "name": "Yoko - CORBA Server",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310271&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310271&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310271&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310271&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10061",
- "id": "10061",
- "description": "Apache J2EE project",
- "name": "Geronimo"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316221",
- "id": "12316221",
- "key": "ZEPPELIN",
- "name": "Zeppelin",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316221&avatarId=27110",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316221&avatarId=27110",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316221&avatarId=27110",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316221&avatarId=27110"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12316820",
- "id": "12316820",
- "key": "ZEST",
- "name": "Zest",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12316820&avatarId=24602",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12316820&avatarId=24602",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12316820&avatarId=24602",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12316820&avatarId=24602"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12311131",
- "id": "12311131",
- "key": "ZETACOMP",
- "name": "Zeta Components",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12311131&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12311131&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12311131&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12311131&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10024",
- "id": "10024",
- "description": "Incubator related projects",
- "name": "Incubator"
- }
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310801",
- "id": "12310801",
- "key": "ZOOKEEPER",
- "name": "ZooKeeper",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310801&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310801&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310801&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310801&avatarId=10011"
- },
- "projectCategory": {
- "self": "https://issues.apache.org/jira/rest/api/2/projectCategory/10484",
- "id": "10484",
- "description": "Apache ZooKeeper related",
- "name": "ZooKeeper"
- }
- }
-]
\ No newline at end of file
diff --git a/vendor/src/github.com/andygrunwald/go-jira/mocks/issues_in_sprint.json b/vendor/src/github.com/andygrunwald/go-jira/mocks/issues_in_sprint.json
deleted file mode 100644
index adc42bc..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/mocks/issues_in_sprint.json
+++ /dev/null
@@ -1,115 +0,0 @@
-{
- "expand": "schema,names",
- "startAt": 0,
- "maxResults": 50,
- "total": 10,
- "issues": [
- {
- "expand": "operations,versionedRepresentations,editmeta,changelog,renderedFields",
- "id": "12338",
- "self": "https://example.atlassian.net/rest/agile/1.0/issue/12338",
- "key": "AR-86",
- "fields": {
- "issuetype": {
- "self": "https://example.atlassian.net/rest/api/2/issuetype/3",
- "id": "3",
- "description": "A task that needs to be done.",
- "iconUrl": "https://example.atlassian.net/secure/viewavatar?size=xsmall&avatarId=10418&avatarType=issuetype",
- "name": "Task",
- "subtask": false,
- "avatarId": 10418
- },
- "timespent": null,
- "project": {
- "self": "https://example.atlassian.net/rest/api/2/project/10302",
- "id": "10302",
- "key": "AR",
- "name": "Team Argon",
- "avatarUrls": {
- "48x48": "https://example.atlassian.net/secure/projectavatar?pid=10302&avatarId=10610",
- "24x24": "https://example.atlassian.net/secure/projectavatar?size=small&pid=10302&avatarId=10610",
- "16x16": "https://example.atlassian.net/secure/projectavatar?size=xsmall&pid=10302&avatarId=10610",
- "32x32": "https://example.atlassian.net/secure/projectavatar?size=medium&pid=10302&avatarId=10610"
- }
- },
- "fixVersions": [],
- "customfield_11200": "0|0zzzzd:vi",
- "aggregatetimespent": null,
- "resolution": {
- "self": "https://example.atlassian.net/rest/api/2/resolution/6",
- "id": "6",
- "description": "",
- "name": "Done"
- },
- "customfield_11401": null,
- "customfield_11400": null,
- "customfield_10105": 13.0,
- "customfield_10700": "AR-37",
- "resolutiondate": "2015-12-07T14:19:13.000-0800",
- "workratio": -1,
- "lastViewed": null,
- "watches": {
- "self": "https://example.atlassian.net/rest/api/2/issue/AR-86/watchers",
- "watchCount": 2,
- "isWatching": true
- },
- "created": "2015-12-02T07:39:15.000-0800",
- "epic": {
- "id": 11900,
- "key": "AR-37",
- "self": "https://example.atlassian.net/rest/agile/1.0/epic/11900",
- "name": "Moderation: Design",
- "summary": "Moderation design",
- "color": {
- "key": "color_8"
- },
- "done": true
- },
- "priority": {
- "self": "https://example.atlassian.net/rest/api/2/priority/3",
- "iconUrl": "https://example.atlassian.net/images/icons/priorities/major.svg",
- "name": "Major",
- "id": "3"
- },
- "customfield_10102": null,
- "customfield_10103": null,
- "labels": [],
- "customfield_11700": null,
- "timeestimate": null,
- "aggregatetimeoriginalestimate": null,
- "versions": [],
- "issuelinks": [],
- "assignee": {
- "self": "https://example.atlassian.net/rest/api/2/user?username=mister.morris",
- "name": "mister.morris",
- "key": "mister.morris",
- "emailAddress": "mister.morris@uservoice.com",
- "avatarUrls": {
- "48x48": "https://example.atlassian.net/secure/useravatar?ownerId=mister.morris&avatarId=10604",
- "24x24": "https://example.atlassian.net/secure/useravatar?size=small&ownerId=mister.morris&avatarId=10604",
- "16x16": "https://example.atlassian.net/secure/useravatar?size=xsmall&ownerId=mister.morris&avatarId=10604",
- "32x32": "https://example.atlassian.net/secure/useravatar?size=medium&ownerId=mister.morris&avatarId=10604"
- },
- "displayName": "mister Morris",
- "active": true,
- "timeZone": "America/New_York"
- },
- "updated": "2016-02-01T08:17:04.000-0800",
- "status": {
- "self": "https://example.atlassian.net/rest/api/2/status/10000",
- "description": "Ready to move to dev team for grooming",
- "iconUrl": "https://example.atlassian.net/images/icons/statuses/closed.png",
- "name": "Ready",
- "id": "10000",
- "statusCategory": {
- "self": "https://example.atlassian.net/rest/api/2/statuscategory/2",
- "id": 2,
- "key": "new",
- "colorName": "blue-gray",
- "name": "To Do"
- }
- }
- }
- }
- ]
-}
\ No newline at end of file
diff --git a/vendor/src/github.com/andygrunwald/go-jira/mocks/project.json b/vendor/src/github.com/andygrunwald/go-jira/mocks/project.json
deleted file mode 100644
index 9658221..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/mocks/project.json
+++ /dev/null
@@ -1,411 +0,0 @@
-{
- "expand": "projectKeys",
- "self": "https://issues.apache.org/jira/rest/api/2/project/12310505",
- "id": "12310505",
- "key": "ABDERA",
- "description": "The Abdera project is an implementation of the Atom Syndication Format (RFC4287) and the Atom Publishing Protocol specifications published by the IETF Atompub working group.",
- "lead": {
- "self": "https://issues.apache.org/jira/rest/api/2/user?username=rooneg",
- "key": "rooneg",
- "name": "rooneg",
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/useravatar?avatarId=10452",
- "24x24": "https://issues.apache.org/jira/secure/useravatar?size=small&avatarId=10452",
- "16x16": "https://issues.apache.org/jira/secure/useravatar?size=xsmall&avatarId=10452",
- "32x32": "https://issues.apache.org/jira/secure/useravatar?size=medium&avatarId=10452"
- },
- "displayName": "Garrett Rooney",
- "active": true
- },
- "components": [],
- "issueTypes": [
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/1",
- "id": "1",
- "description": "A problem which impairs or prevents the functions of the product.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/bug.png",
- "name": "Bug",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/2",
- "id": "2",
- "description": "A new feature of the product, which has yet to be developed.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/newfeature.png",
- "name": "New Feature",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/4",
- "id": "4",
- "description": "An improvement or enhancement to an existing feature or task.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/improvement.png",
- "name": "Improvement",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/6",
- "id": "6",
- "description": "A new unit, integration or system test.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/requirement.png",
- "name": "Test",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/5",
- "id": "5",
- "description": "General wishlist item.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/improvement.png",
- "name": "Wish",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/3",
- "id": "3",
- "description": "A task that needs to be done.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/task.png",
- "name": "Task",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/7",
- "id": "7",
- "description": "The sub-task of the issue",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/subtask_alternate.png",
- "name": "Sub-task",
- "subtask": true
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/8",
- "id": "8",
- "description": "A request for a new JIRA project to be set up",
- "iconUrl": "https://issues.apache.org/jira/images/icons/jiraman18.gif",
- "name": "New JIRA Project",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/9",
- "id": "9",
- "description": "An RTC request",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/newfeature.png",
- "name": "RTC",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10",
- "id": "10",
- "description": "Challenges made against the Sun Compatibility Test Suite",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/task.png",
- "name": "TCK Challenge",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/11",
- "id": "11",
- "description": "A formal question. Initially added for the Legal JIRA.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/genericissue.png",
- "name": "Question",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/12",
- "id": "12",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/genericissue.png",
- "name": "Temp",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/13",
- "id": "13",
- "description": "A place to record back and forth on notions not yet formed enough to make a 'New Feature' or 'Task'",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/genericissue.png",
- "name": "Brainstorming",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/14",
- "id": "14",
- "description": "An overarching type made of sub-tasks",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/genericissue.png",
- "name": "Umbrella",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/15",
- "id": "15",
- "description": "Created by JIRA Agile - do not edit or delete. Issue type for a big user story that needs to be broken down.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/epic.png",
- "name": "Epic",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/16",
- "id": "16",
- "description": "Created by JIRA Agile - do not edit or delete. Issue type for a user story.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/story.png",
- "name": "Story",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/17",
- "id": "17",
- "description": "A technical task.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/task_agile.png",
- "name": "Technical task",
- "subtask": true
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/18",
- "id": "18",
- "description": "Upgrading a dependency to a newer version",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/improvement.png",
- "name": "Dependency upgrade",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/19",
- "id": "19",
- "description": "A search for a suitable name for an Apache product",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/requirement.png",
- "name": "Suitable Name Search",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/20",
- "id": "20",
- "description": "Documentation or Website",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/documentation.png",
- "name": "Documentation",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10000",
- "id": "10000",
- "description": "Assigned specifically to Contractors by the VP Infra or or other VP/ Board Member.",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/sales.png",
- "name": "Planned Work",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10100",
- "id": "10100",
- "description": "A request for a new Confluence Wiki to be set up",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=23211&avatarType=issuetype",
- "name": "New Confluence Wiki",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10200",
- "id": "10200",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=21140&avatarType=issuetype",
- "name": "New Git Repo",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10201",
- "id": "10201",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=21130&avatarType=issuetype",
- "name": "Github Integration",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10202",
- "id": "10202",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=21130&avatarType=issuetype",
- "name": "New TLP ",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10204",
- "id": "10204",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=21130&avatarType=issuetype",
- "name": "New TLP - Common Tasks",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10205",
- "id": "10205",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=21134&avatarType=issuetype",
- "name": "SVN->GIT Migration",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10206",
- "id": "10206",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=21130&avatarType=issuetype",
- "name": "Blog - New Blog Request",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10207",
- "id": "10207",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=21130&avatarType=issuetype",
- "name": "Blogs - New Blog User Account Request",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10208",
- "id": "10208",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=21130&avatarType=issuetype",
- "name": "Blogs - Access to Existing Blog",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10209",
- "id": "10209",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=21130&avatarType=issuetype",
- "name": "New Bugzilla Project",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10210",
- "id": "10210",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/secure/viewavatar?size=xsmall&avatarId=21130&avatarType=issuetype",
- "name": "SVN->GIT Mirroring",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10300",
- "id": "10300",
- "description": "For general IT problems and questions. Created by JIRA Service Desk.",
- "iconUrl": "https://issues.apache.org/jira/servicedesk/issue-type-icons?icon=it-help",
- "name": "IT Help",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10301",
- "id": "10301",
- "description": "For new system accounts or passwords. Created by JIRA Service Desk.",
- "iconUrl": "https://issues.apache.org/jira/servicedesk/issue-type-icons?icon=access",
- "name": "Access",
- "subtask": false
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/issuetype/10400",
- "id": "10400",
- "description": "",
- "iconUrl": "https://issues.apache.org/jira/images/icons/issuetypes/genericissue.png",
- "name": "Request",
- "subtask": false
- }
- ],
- "url": "http://abdera.apache.org",
- "assigneeType": "UNASSIGNED",
- "versions": [
- {
- "self": "https://issues.apache.org/jira/rest/api/2/version/12312506",
- "id": "12312506",
- "name": "0.2.2",
- "archived": false,
- "released": true,
- "releaseDate": "2007-02-19",
- "userReleaseDate": "19/Feb/07",
- "projectId": 12310505
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/version/12312507",
- "id": "12312507",
- "name": "0.3.0",
- "archived": false,
- "released": true,
- "releaseDate": "2007-10-05",
- "userReleaseDate": "05/Oct/07",
- "projectId": 12310505
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/version/12312825",
- "id": "12312825",
- "name": "0.4.0",
- "archived": false,
- "released": true,
- "releaseDate": "2008-04-11",
- "userReleaseDate": "11/Apr/08",
- "projectId": 12310505
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/version/12313105",
- "id": "12313105",
- "name": "1.0",
- "archived": false,
- "released": true,
- "releaseDate": "2010-05-02",
- "userReleaseDate": "02/May/10",
- "projectId": 12310505
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/version/12314990",
- "id": "12314990",
- "name": "1.1",
- "archived": false,
- "released": true,
- "releaseDate": "2010-07-11",
- "userReleaseDate": "11/Jul/10",
- "projectId": 12310505
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/version/12315922",
- "id": "12315922",
- "name": "1.1.1",
- "archived": false,
- "released": true,
- "releaseDate": "2010-12-06",
- "userReleaseDate": "06/Dec/10",
- "projectId": 12310505
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/version/12316041",
- "id": "12316041",
- "name": "1.1.2",
- "archived": false,
- "released": true,
- "releaseDate": "2011-01-15",
- "userReleaseDate": "15/Jan/11",
- "projectId": 12310505
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/version/12323557",
- "id": "12323557",
- "name": "1.1.3",
- "archived": false,
- "released": false,
- "projectId": 12310505
- },
- {
- "self": "https://issues.apache.org/jira/rest/api/2/version/12316141",
- "id": "12316141",
- "name": "1.2",
- "archived": false,
- "released": false,
- "projectId": 12310505
- }
- ],
- "name": "Abdera",
- "roles": {
- "Service Desk Team": "https://issues.apache.org/jira/rest/api/2/project/12310505/role/10251",
- "Developers": "https://issues.apache.org/jira/rest/api/2/project/12310505/role/10050",
- "Service Desk Customers": "https://issues.apache.org/jira/rest/api/2/project/12310505/role/10250",
- "Contributors": "https://issues.apache.org/jira/rest/api/2/project/12310505/role/10010",
- "PMC": "https://issues.apache.org/jira/rest/api/2/project/12310505/role/10011",
- "Committers": "https://issues.apache.org/jira/rest/api/2/project/12310505/role/10001",
- "Administrators": "https://issues.apache.org/jira/rest/api/2/project/12310505/role/10002",
- "ASF Members": "https://issues.apache.org/jira/rest/api/2/project/12310505/role/10150",
- "Users": "https://issues.apache.org/jira/rest/api/2/project/12310505/role/10040"
- },
- "avatarUrls": {
- "48x48": "https://issues.apache.org/jira/secure/projectavatar?pid=12310505&avatarId=10011",
- "24x24": "https://issues.apache.org/jira/secure/projectavatar?size=small&pid=12310505&avatarId=10011",
- "16x16": "https://issues.apache.org/jira/secure/projectavatar?size=xsmall&pid=12310505&avatarId=10011",
- "32x32": "https://issues.apache.org/jira/secure/projectavatar?size=medium&pid=12310505&avatarId=10011"
- }
-}
\ No newline at end of file
diff --git a/vendor/src/github.com/andygrunwald/go-jira/mocks/sprints.json b/vendor/src/github.com/andygrunwald/go-jira/mocks/sprints.json
deleted file mode 100644
index 508ca35..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/mocks/sprints.json
+++ /dev/null
@@ -1,46 +0,0 @@
-{
- "isLast": true,
- "maxResults": 50,
- "startAt": 0,
- "values": [
- {
- "completeDate": "2016-04-28T05:08:48.543-07:00",
- "endDate": "2016-04-27T08:29:00.000-07:00",
- "id": 740,
- "name": "Iteration-10",
- "originBoardId": 734,
- "self": "https://jira.com/rest/agile/1.0/sprint/740",
- "startDate": "2016-04-11T07:29:03.294-07:00",
- "state": "closed"
- },
- {
- "completeDate": "2016-05-30T02:45:44.991-07:00",
- "endDate": "2016-05-26T14:56:00.000-07:00",
- "id": 776,
- "name": "Iteration-12-1",
- "originBoardId": 734,
- "self": "https://jira.com/rest/agile/1.0/sprint/776",
- "startDate": "2016-05-19T13:56:00.000-07:00",
- "state": "closed"
- },
- {
- "completeDate": "2016-06-08T07:54:13.723-07:00",
- "endDate": "2016-06-08T01:06:00.000-07:00",
- "id": 807,
- "name": "Iteration-12-2",
- "originBoardId": 734,
- "self": "https://jira.com/rest/agile/1.0/sprint/807",
- "startDate": "2016-06-01T00:06:00.000-07:00",
- "state": "closed"
- },
- {
- "endDate": "2016-06-28T14:24:00.000-07:00",
- "id": 832,
- "name": "Iteration-13-2",
- "originBoardId": 734,
- "self": "https://jira.com/rest/agile/1.0/sprint/832",
- "startDate": "2016-06-20T13:24:39.161-07:00",
- "state": "active"
- }
- ]
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/mocks/transitions.json b/vendor/src/github.com/andygrunwald/go-jira/mocks/transitions.json
deleted file mode 100644
index d21f409..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/mocks/transitions.json
+++ /dev/null
@@ -1,101 +0,0 @@
-{
- "expand": "transitions",
- "transitions": [
- {
- "id": "2",
- "name": "Close Issue",
- "to": {
- "self": "http://localhost:8090/jira/rest/api/2.0/status/10000",
- "description": "The issue is currently being worked on.",
- "iconUrl": "http://localhost:8090/jira/images/icons/progress.gif",
- "name": "In Progress",
- "id": "10000",
- "statusCategory": {
- "self": "http://localhost:8090/jira/rest/api/2.0/statuscategory/1",
- "id": 1,
- "key": "in-flight",
- "colorName": "yellow",
- "name": "In Progress"
- }
- },
- "fields": {
- "summary": {
- "required": false,
- "schema": {
- "type": "array",
- "items": "option",
- "custom": "com.atlassian.jira.plugin.system.customfieldtypes:multiselect",
- "customId": 10001
- },
- "name": "My Multi Select",
- "hasDefaultValue": false,
- "operations": [
- "set",
- "add"
- ],
- "allowedValues": [
- "red",
- "blue"
- ]
- }
- }
- },
- {
- "id": "711",
- "name": "QA Review",
- "to": {
- "self": "http://localhost:8090/jira/rest/api/2.0/status/5",
- "description": "The issue is closed.",
- "iconUrl": "http://localhost:8090/jira/images/icons/closed.gif",
- "name": "Closed",
- "id": "5",
- "statusCategory": {
- "self": "http://localhost:8090/jira/rest/api/2.0/statuscategory/9",
- "id": 9,
- "key": "completed",
- "colorName": "green"
- }
- },
- "fields": {
- "summary": {
- "required": false,
- "schema": {
- "type": "array",
- "items": "option",
- "custom": "com.atlassian.jira.plugin.system.customfieldtypes:multiselect",
- "customId": 10001
- },
- "name": "My Multi Select",
- "hasDefaultValue": false,
- "operations": [
- "set",
- "add"
- ],
- "allowedValues": [
- "red",
- "blue"
- ]
- },
- "colour": {
- "required": false,
- "schema": {
- "type": "array",
- "items": "option",
- "custom": "com.atlassian.jira.plugin.system.customfieldtypes:multiselect",
- "customId": 10001
- },
- "name": "My Multi Select",
- "hasDefaultValue": false,
- "operations": [
- "set",
- "add"
- ],
- "allowedValues": [
- "red",
- "blue"
- ]
- }
- }
- }
- ]
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/project.go b/vendor/src/github.com/andygrunwald/go-jira/project.go
deleted file mode 100644
index 77b7e15..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/project.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package jira
-
-import (
- "fmt"
-)
-
-// ProjectService handles projects for the JIRA instance / API.
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/project
-type ProjectService struct {
- client *Client
-}
-
-// ProjectList represent a list of Projects
-type ProjectList []struct {
- Expand string `json:"expand"`
- Self string `json:"self"`
- ID string `json:"id"`
- Key string `json:"key"`
- Name string `json:"name"`
- AvatarUrls AvatarUrls `json:"avatarUrls"`
- ProjectTypeKey string `json:"projectTypeKey"`
- ProjectCategory ProjectCategory `json:"projectCategory,omitempty"`
-}
-
-// ProjectCategory represents a single project category
-type ProjectCategory struct {
- Self string `json:"self"`
- ID string `json:"id"`
- Name string `json:"name"`
- Description string `json:"description"`
-}
-
-// Project represents a JIRA Project.
-type Project struct {
- Expand string `json:"expand,omitempty"`
- Self string `json:"self,omitempty"`
- ID string `json:"id,omitempty"`
- Key string `json:"key,omitempty"`
- Description string `json:"description,omitempty"`
- Lead User `json:"lead,omitempty"`
- Components []ProjectComponent `json:"components,omitempty"`
- IssueTypes []IssueType `json:"issueTypes,omitempty"`
- URL string `json:"url,omitempty"`
- Email string `json:"email,omitempty"`
- AssigneeType string `json:"assigneeType,omitempty"`
- Versions []Version `json:"versions,omitempty"`
- Name string `json:"name,omitempty"`
- Roles struct {
- Developers string `json:"Developers,omitempty"`
- } `json:"roles,omitempty"`
- AvatarUrls AvatarUrls `json:"avatarUrls,omitempty"`
- ProjectCategory ProjectCategory `json:"projectCategory,omitempty"`
-}
-
-// Version represents a single release version of a project
-type Version struct {
- Self string `json:"self"`
- ID string `json:"id"`
- Name string `json:"name"`
- Archived bool `json:"archived"`
- Released bool `json:"released"`
- ReleaseDate string `json:"releaseDate"`
- UserReleaseDate string `json:"userReleaseDate"`
- ProjectID int `json:"projectId"` // Unlike other IDs, this is returned as a number
-}
-
-// ProjectComponent represents a single component of a project
-type ProjectComponent struct {
- Self string `json:"self"`
- ID string `json:"id"`
- Name string `json:"name"`
- Description string `json:"description"`
- Lead User `json:"lead"`
- AssigneeType string `json:"assigneeType"`
- Assignee User `json:"assignee"`
- RealAssigneeType string `json:"realAssigneeType"`
- RealAssignee User `json:"realAssignee"`
- IsAssigneeTypeValid bool `json:"isAssigneeTypeValid"`
- Project string `json:"project"`
- ProjectID int `json:"projectId"`
-}
-
-// GetList gets all projects form JIRA
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/project-getAllProjects
-func (s *ProjectService) GetList() (*ProjectList, *Response, error) {
- apiEndpoint := "rest/api/2/project"
- req, err := s.client.NewRequest("GET", apiEndpoint, nil)
- if err != nil {
- return nil, nil, err
- }
-
- projectList := new(ProjectList)
- resp, err := s.client.Do(req, projectList)
- if err != nil {
- return nil, resp, err
- }
- return projectList, resp, nil
-}
-
-// Get returns a full representation of the project for the given issue key.
-// JIRA will attempt to identify the project by the projectIdOrKey path parameter.
-// This can be an project id, or an project key.
-//
-// JIRA API docs: https://docs.atlassian.com/jira/REST/latest/#api/2/project-getProject
-func (s *ProjectService) Get(projectID string) (*Project, *Response, error) {
- apiEndpoint := fmt.Sprintf("/rest/api/2/project/%s", projectID)
- req, err := s.client.NewRequest("GET", apiEndpoint, nil)
- if err != nil {
- return nil, nil, err
- }
-
- project := new(Project)
- resp, err := s.client.Do(req, project)
- if err != nil {
- return nil, resp, err
- }
- return project, resp, nil
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/project_test.go b/vendor/src/github.com/andygrunwald/go-jira/project_test.go
deleted file mode 100644
index f2d53d0..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/project_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package jira
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "testing"
-)
-
-func TestProjectService_GetList(t *testing.T) {
- setup()
- defer teardown()
- testAPIEdpoint := "/rest/api/2/project"
-
- raw, err := ioutil.ReadFile("./mocks/all_projects.json")
- if err != nil {
- t.Error(err.Error())
- }
- testMux.HandleFunc(testAPIEdpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, testAPIEdpoint)
- fmt.Fprint(w, string(raw))
- })
-
- projects, _, err := testClient.Project.GetList()
- if projects == nil {
- t.Error("Expected project list. Project list is nil")
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestProjectService_Get(t *testing.T) {
- setup()
- defer teardown()
- testAPIEdpoint := "/rest/api/2/project/12310505"
-
- raw, err := ioutil.ReadFile("./mocks/project.json")
- if err != nil {
- t.Error(err.Error())
- }
- testMux.HandleFunc(testAPIEdpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, testAPIEdpoint)
- fmt.Fprint(w, string(raw))
- })
-
- projects, _, err := testClient.Project.Get("12310505")
- if projects == nil {
- t.Error("Expected project list. Project list is nil")
- }
- if err != nil {
- t.Errorf("Error given: %s", err)
- }
-}
-
-func TestProjectService_Get_NoProject(t *testing.T) {
- setup()
- defer teardown()
- testAPIEdpoint := "/rest/api/2/project/99999999"
-
- testMux.HandleFunc(testAPIEdpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, testAPIEdpoint)
- fmt.Fprint(w, nil)
- })
-
- projects, resp, err := testClient.Project.Get("99999999")
- if projects != nil {
- t.Errorf("Expected nil. Got %+v", projects)
- }
-
- if resp.Status == "404" {
- t.Errorf("Expected status 404. Got %s", resp.Status)
- }
- if err == nil {
- t.Errorf("Error given: %s", err)
- }
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/sprint.go b/vendor/src/github.com/andygrunwald/go-jira/sprint.go
deleted file mode 100644
index a101eb9..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/sprint.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package jira
-
-import (
- "fmt"
-)
-
-// SprintService handles sprints in JIRA Agile API.
-// See https://docs.atlassian.com/jira-software/REST/cloud/
-type SprintService struct {
- client *Client
-}
-
-// IssuesWrapper represents a wrapper struct for moving issues to sprint
-type IssuesWrapper struct {
- Issues []string `json:"issues"`
-}
-
-// IssuesInSprintResult represents a wrapper struct for search result
-type IssuesInSprintResult struct {
- Issues []Issue `json:"issues"`
-}
-
-// MoveIssuesToSprint moves issues to a sprint, for a given sprint Id.
-// Issues can only be moved to open or active sprints.
-// The maximum number of issues that can be moved in one operation is 50.
-//
-// JIRA API docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/sprint-moveIssuesToSprint
-func (s *SprintService) MoveIssuesToSprint(sprintID int, issueIDs []string) (*Response, error) {
- apiEndpoint := fmt.Sprintf("rest/agile/1.0/sprint/%d/issue", sprintID)
-
- payload := IssuesWrapper{Issues: issueIDs}
-
- req, err := s.client.NewRequest("POST", apiEndpoint, payload)
-
- if err != nil {
- return nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- return resp, err
-}
-
-// GetIssuesForSprint returns all issues in a sprint, for a given sprint Id.
-// This only includes issues that the user has permission to view.
-// By default, the returned issues are ordered by rank.
-//
-// JIRA API Docs: https://docs.atlassian.com/jira-software/REST/cloud/#agile/1.0/sprint-getIssuesForSprint
-func (s *SprintService) GetIssuesForSprint(sprintID int) ([]Issue, *Response, error) {
- apiEndpoint := fmt.Sprintf("rest/agile/1.0/sprint/%d/issue", sprintID)
-
- req, err := s.client.NewRequest("GET", apiEndpoint, nil)
-
- if err != nil {
- return nil, nil, err
- }
-
- result := new(IssuesInSprintResult)
- resp, err := s.client.Do(req, result)
- return result.Issues, resp, err
-}
diff --git a/vendor/src/github.com/andygrunwald/go-jira/sprint_test.go b/vendor/src/github.com/andygrunwald/go-jira/sprint_test.go
deleted file mode 100644
index 40f0546..0000000
--- a/vendor/src/github.com/andygrunwald/go-jira/sprint_test.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package jira
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "testing"
-)
-
-func TestSprintService_MoveIssuesToSprint(t *testing.T) {
- setup()
- defer teardown()
-
- testAPIEndpoint := "/rest/agile/1.0/sprint/123/issue"
-
- issuesToMove := []string{"KEY-1", "KEY-2"}
-
- testMux.HandleFunc(testAPIEndpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testRequestURL(t, r, testAPIEndpoint)
-
- decoder := json.NewDecoder(r.Body)
- var payload IssuesWrapper
- err := decoder.Decode(&payload)
- if err != nil {
- t.Errorf("Got error: %v", err)
- }
-
- if payload.Issues[0] != issuesToMove[0] {
- t.Errorf("Expected %s to be in payload, got %s instead", issuesToMove[0], payload.Issues[0])
- }
- })
- _, err := testClient.Sprint.MoveIssuesToSprint(123, issuesToMove)
-
- if err != nil {
- t.Errorf("Got error: %v", err)
- }
-}
-
-func TestSprintService_GetIssuesForSprint(t *testing.T) {
- setup()
- defer teardown()
- testAPIEdpoint := "/rest/agile/1.0/sprint/123/issue"
-
- raw, err := ioutil.ReadFile("./mocks/issues_in_sprint.json")
- if err != nil {
- t.Error(err.Error())
- }
- testMux.HandleFunc(testAPIEdpoint, func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testRequestURL(t, r, testAPIEdpoint)
- fmt.Fprint(w, string(raw))
- })
-
- issues, _, err := testClient.Sprint.GetIssuesForSprint(123)
- if err != nil {
- t.Errorf("Error given: %v", err)
- }
- if issues == nil {
- t.Error("Expected issues in sprint list. Issues list is nil")
- }
- if len(issues) != 1 {
- t.Errorf("Expect there to be 1 issue in the sprint, found %v", len(issues))
- }
-
-}
diff --git a/vendor/src/github.com/beorn7/perks/quantile/bench_test.go b/vendor/src/github.com/beorn7/perks/quantile/bench_test.go
deleted file mode 100644
index 0bd0e4e..0000000
--- a/vendor/src/github.com/beorn7/perks/quantile/bench_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package quantile
-
-import (
- "testing"
-)
-
-func BenchmarkInsertTargeted(b *testing.B) {
- b.ReportAllocs()
-
- s := NewTargeted(Targets)
- b.ResetTimer()
- for i := float64(0); i < float64(b.N); i++ {
- s.Insert(i)
- }
-}
-
-func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) {
- s := NewTargeted(TargetsSmallEpsilon)
- b.ResetTimer()
- for i := float64(0); i < float64(b.N); i++ {
- s.Insert(i)
- }
-}
-
-func BenchmarkInsertBiased(b *testing.B) {
- s := NewLowBiased(0.01)
- b.ResetTimer()
- for i := float64(0); i < float64(b.N); i++ {
- s.Insert(i)
- }
-}
-
-func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) {
- s := NewLowBiased(0.0001)
- b.ResetTimer()
- for i := float64(0); i < float64(b.N); i++ {
- s.Insert(i)
- }
-}
-
-func BenchmarkQuery(b *testing.B) {
- s := NewTargeted(Targets)
- for i := float64(0); i < 1e6; i++ {
- s.Insert(i)
- }
- b.ResetTimer()
- n := float64(b.N)
- for i := float64(0); i < n; i++ {
- s.Query(i / n)
- }
-}
-
-func BenchmarkQuerySmallEpsilon(b *testing.B) {
- s := NewTargeted(TargetsSmallEpsilon)
- for i := float64(0); i < 1e6; i++ {
- s.Insert(i)
- }
- b.ResetTimer()
- n := float64(b.N)
- for i := float64(0); i < n; i++ {
- s.Query(i / n)
- }
-}
diff --git a/vendor/src/github.com/beorn7/perks/quantile/example_test.go b/vendor/src/github.com/beorn7/perks/quantile/example_test.go
deleted file mode 100644
index ab3293a..0000000
--- a/vendor/src/github.com/beorn7/perks/quantile/example_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// +build go1.1
-
-package quantile_test
-
-import (
- "bufio"
- "fmt"
- "log"
- "os"
- "strconv"
- "time"
-
- "github.com/beorn7/perks/quantile"
-)
-
-func Example_simple() {
- ch := make(chan float64)
- go sendFloats(ch)
-
- // Compute the 50th, 90th, and 99th percentile.
- q := quantile.NewTargeted(map[float64]float64{
- 0.50: 0.005,
- 0.90: 0.001,
- 0.99: 0.0001,
- })
- for v := range ch {
- q.Insert(v)
- }
-
- fmt.Println("perc50:", q.Query(0.50))
- fmt.Println("perc90:", q.Query(0.90))
- fmt.Println("perc99:", q.Query(0.99))
- fmt.Println("count:", q.Count())
- // Output:
- // perc50: 5
- // perc90: 16
- // perc99: 223
- // count: 2388
-}
-
-func Example_mergeMultipleStreams() {
- // Scenario:
- // We have multiple database shards. On each shard, there is a process
- // collecting query response times from the database logs and inserting
- // them into a Stream (created via NewTargeted(0.90)), much like the
- // Simple example. These processes expose a network interface for us to
- // ask them to serialize and send us the results of their
- // Stream.Samples so we may Merge and Query them.
- //
- // NOTES:
- // * These sample sets are small, allowing us to get them
- // across the network much faster than sending the entire list of data
- // points.
- //
- // * For this to work correctly, we must supply the same quantiles
- // a priori the process collecting the samples supplied to NewTargeted,
- // even if we do not plan to query them all here.
- ch := make(chan quantile.Samples)
- getDBQuerySamples(ch)
- q := quantile.NewTargeted(map[float64]float64{0.90: 0.001})
- for samples := range ch {
- q.Merge(samples)
- }
- fmt.Println("perc90:", q.Query(0.90))
-}
-
-func Example_window() {
- // Scenario: We want the 90th, 95th, and 99th percentiles for each
- // minute.
-
- ch := make(chan float64)
- go sendStreamValues(ch)
-
- tick := time.NewTicker(1 * time.Minute)
- q := quantile.NewTargeted(map[float64]float64{
- 0.90: 0.001,
- 0.95: 0.0005,
- 0.99: 0.0001,
- })
- for {
- select {
- case t := <-tick.C:
- flushToDB(t, q.Samples())
- q.Reset()
- case v := <-ch:
- q.Insert(v)
- }
- }
-}
-
-func sendStreamValues(ch chan float64) {
- // Use your imagination
-}
-
-func flushToDB(t time.Time, samples quantile.Samples) {
- // Use your imagination
-}
-
-// This is a stub for the above example. In reality this would hit the remote
-// servers via http or something like it.
-func getDBQuerySamples(ch chan quantile.Samples) {}
-
-func sendFloats(ch chan<- float64) {
- f, err := os.Open("exampledata.txt")
- if err != nil {
- log.Fatal(err)
- }
- sc := bufio.NewScanner(f)
- for sc.Scan() {
- b := sc.Bytes()
- v, err := strconv.ParseFloat(string(b), 64)
- if err != nil {
- log.Fatal(err)
- }
- ch <- v
- }
- if sc.Err() != nil {
- log.Fatal(sc.Err())
- }
- close(ch)
-}
diff --git a/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
deleted file mode 100644
index 1602287..0000000
--- a/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt
+++ /dev/null
@@ -1,2388 +0,0 @@
-8
-5
-26
-12
-5
-235
-13
-6
-28
-30
-3
-3
-3
-3
-5
-2
-33
-7
-2
-4
-7
-12
-14
-5
-8
-3
-10
-4
-5
-3
-6
-6
-209
-20
-3
-10
-14
-3
-4
-6
-8
-5
-11
-7
-3
-2
-3
-3
-212
-5
-222
-4
-10
-10
-5
-6
-3
-8
-3
-10
-254
-220
-2
-3
-5
-24
-5
-4
-222
-7
-3
-3
-223
-8
-15
-12
-14
-14
-3
-2
-2
-3
-13
-3
-11
-4
-4
-6
-5
-7
-13
-5
-3
-5
-2
-5
-3
-5
-2
-7
-15
-17
-14
-3
-6
-6
-3
-17
-5
-4
-7
-6
-4
-4
-8
-6
-8
-3
-9
-3
-6
-3
-4
-5
-3
-3
-660
-4
-6
-10
-3
-6
-3
-2
-5
-13
-2
-4
-4
-10
-4
-8
-4
-3
-7
-9
-9
-3
-10
-37
-3
-13
-4
-12
-3
-6
-10
-8
-5
-21
-2
-3
-8
-3
-2
-3
-3
-4
-12
-2
-4
-8
-8
-4
-3
-2
-20
-1
-6
-32
-2
-11
-6
-18
-3
-8
-11
-3
-212
-3
-4
-2
-6
-7
-12
-11
-3
-2
-16
-10
-6
-4
-6
-3
-2
-7
-3
-2
-2
-2
-2
-5
-6
-4
-3
-10
-3
-4
-6
-5
-3
-4
-4
-5
-6
-4
-3
-4
-4
-5
-7
-5
-5
-3
-2
-7
-2
-4
-12
-4
-5
-6
-2
-4
-4
-8
-4
-15
-13
-7
-16
-5
-3
-23
-5
-5
-7
-3
-2
-9
-8
-7
-5
-8
-11
-4
-10
-76
-4
-47
-4
-3
-2
-7
-4
-2
-3
-37
-10
-4
-2
-20
-5
-4
-4
-10
-10
-4
-3
-7
-23
-240
-7
-13
-5
-5
-3
-3
-2
-5
-4
-2
-8
-7
-19
-2
-23
-8
-7
-2
-5
-3
-8
-3
-8
-13
-5
-5
-5
-2
-3
-23
-4
-9
-8
-4
-3
-3
-5
-220
-2
-3
-4
-6
-14
-3
-53
-6
-2
-5
-18
-6
-3
-219
-6
-5
-2
-5
-3
-6
-5
-15
-4
-3
-17
-3
-2
-4
-7
-2
-3
-3
-4
-4
-3
-2
-664
-6
-3
-23
-5
-5
-16
-5
-8
-2
-4
-2
-24
-12
-3
-2
-3
-5
-8
-3
-5
-4
-3
-14
-3
-5
-8
-2
-3
-7
-9
-4
-2
-3
-6
-8
-4
-3
-4
-6
-5
-3
-3
-6
-3
-19
-4
-4
-6
-3
-6
-3
-5
-22
-5
-4
-4
-3
-8
-11
-4
-9
-7
-6
-13
-4
-4
-4
-6
-17
-9
-3
-3
-3
-4
-3
-221
-5
-11
-3
-4
-2
-12
-6
-3
-5
-7
-5
-7
-4
-9
-7
-14
-37
-19
-217
-16
-3
-5
-2
-2
-7
-19
-7
-6
-7
-4
-24
-5
-11
-4
-7
-7
-9
-13
-3
-4
-3
-6
-28
-4
-4
-5
-5
-2
-5
-6
-4
-4
-6
-10
-5
-4
-3
-2
-3
-3
-6
-5
-5
-4
-3
-2
-3
-7
-4
-6
-18
-16
-8
-16
-4
-5
-8
-6
-9
-13
-1545
-6
-215
-6
-5
-6
-3
-45
-31
-5
-2
-2
-4
-3
-3
-2
-5
-4
-3
-5
-7
-7
-4
-5
-8
-5
-4
-749
-2
-31
-9
-11
-2
-11
-5
-4
-4
-7
-9
-11
-4
-5
-4
-7
-3
-4
-6
-2
-15
-3
-4
-3
-4
-3
-5
-2
-13
-5
-5
-3
-3
-23
-4
-4
-5
-7
-4
-13
-2
-4
-3
-4
-2
-6
-2
-7
-3
-5
-5
-3
-29
-5
-4
-4
-3
-10
-2
-3
-79
-16
-6
-6
-7
-7
-3
-5
-5
-7
-4
-3
-7
-9
-5
-6
-5
-9
-6
-3
-6
-4
-17
-2
-10
-9
-3
-6
-2
-3
-21
-22
-5
-11
-4
-2
-17
-2
-224
-2
-14
-3
-4
-4
-2
-4
-4
-4
-4
-5
-3
-4
-4
-10
-2
-6
-3
-3
-5
-7
-2
-7
-5
-6
-3
-218
-2
-2
-5
-2
-6
-3
-5
-222
-14
-6
-33
-3
-2
-5
-3
-3
-3
-9
-5
-3
-3
-2
-7
-4
-3
-4
-3
-5
-6
-5
-26
-4
-13
-9
-7
-3
-221
-3
-3
-4
-4
-4
-4
-2
-18
-5
-3
-7
-9
-6
-8
-3
-10
-3
-11
-9
-5
-4
-17
-5
-5
-6
-6
-3
-2
-4
-12
-17
-6
-7
-218
-4
-2
-4
-10
-3
-5
-15
-3
-9
-4
-3
-3
-6
-29
-3
-3
-4
-5
-5
-3
-8
-5
-6
-6
-7
-5
-3
-5
-3
-29
-2
-31
-5
-15
-24
-16
-5
-207
-4
-3
-3
-2
-15
-4
-4
-13
-5
-5
-4
-6
-10
-2
-7
-8
-4
-6
-20
-5
-3
-4
-3
-12
-12
-5
-17
-7
-3
-3
-3
-6
-10
-3
-5
-25
-80
-4
-9
-3
-2
-11
-3
-3
-2
-3
-8
-7
-5
-5
-19
-5
-3
-3
-12
-11
-2
-6
-5
-5
-5
-3
-3
-3
-4
-209
-14
-3
-2
-5
-19
-4
-4
-3
-4
-14
-5
-6
-4
-13
-9
-7
-4
-7
-10
-2
-9
-5
-7
-2
-8
-4
-6
-5
-5
-222
-8
-7
-12
-5
-216
-3
-4
-4
-6
-3
-14
-8
-7
-13
-4
-3
-3
-3
-3
-17
-5
-4
-3
-33
-6
-6
-33
-7
-5
-3
-8
-7
-5
-2
-9
-4
-2
-233
-24
-7
-4
-8
-10
-3
-4
-15
-2
-16
-3
-3
-13
-12
-7
-5
-4
-207
-4
-2
-4
-27
-15
-2
-5
-2
-25
-6
-5
-5
-6
-13
-6
-18
-6
-4
-12
-225
-10
-7
-5
-2
-2
-11
-4
-14
-21
-8
-10
-3
-5
-4
-232
-2
-5
-5
-3
-7
-17
-11
-6
-6
-23
-4
-6
-3
-5
-4
-2
-17
-3
-6
-5
-8
-3
-2
-2
-14
-9
-4
-4
-2
-5
-5
-3
-7
-6
-12
-6
-10
-3
-6
-2
-2
-19
-5
-4
-4
-9
-2
-4
-13
-3
-5
-6
-3
-6
-5
-4
-9
-6
-3
-5
-7
-3
-6
-6
-4
-3
-10
-6
-3
-221
-3
-5
-3
-6
-4
-8
-5
-3
-6
-4
-4
-2
-54
-5
-6
-11
-3
-3
-4
-4
-4
-3
-7
-3
-11
-11
-7
-10
-6
-13
-223
-213
-15
-231
-7
-3
-7
-228
-2
-3
-4
-4
-5
-6
-7
-4
-13
-3
-4
-5
-3
-6
-4
-6
-7
-2
-4
-3
-4
-3
-3
-6
-3
-7
-3
-5
-18
-5
-6
-8
-10
-3
-3
-3
-2
-4
-2
-4
-4
-5
-6
-6
-4
-10
-13
-3
-12
-5
-12
-16
-8
-4
-19
-11
-2
-4
-5
-6
-8
-5
-6
-4
-18
-10
-4
-2
-216
-6
-6
-6
-2
-4
-12
-8
-3
-11
-5
-6
-14
-5
-3
-13
-4
-5
-4
-5
-3
-28
-6
-3
-7
-219
-3
-9
-7
-3
-10
-6
-3
-4
-19
-5
-7
-11
-6
-15
-19
-4
-13
-11
-3
-7
-5
-10
-2
-8
-11
-2
-6
-4
-6
-24
-6
-3
-3
-3
-3
-6
-18
-4
-11
-4
-2
-5
-10
-8
-3
-9
-5
-3
-4
-5
-6
-2
-5
-7
-4
-4
-14
-6
-4
-4
-5
-5
-7
-2
-4
-3
-7
-3
-3
-6
-4
-5
-4
-4
-4
-3
-3
-3
-3
-8
-14
-2
-3
-5
-3
-2
-4
-5
-3
-7
-3
-3
-18
-3
-4
-4
-5
-7
-3
-3
-3
-13
-5
-4
-8
-211
-5
-5
-3
-5
-2
-5
-4
-2
-655
-6
-3
-5
-11
-2
-5
-3
-12
-9
-15
-11
-5
-12
-217
-2
-6
-17
-3
-3
-207
-5
-5
-4
-5
-9
-3
-2
-8
-5
-4
-3
-2
-5
-12
-4
-14
-5
-4
-2
-13
-5
-8
-4
-225
-4
-3
-4
-5
-4
-3
-3
-6
-23
-9
-2
-6
-7
-233
-4
-4
-6
-18
-3
-4
-6
-3
-4
-4
-2
-3
-7
-4
-13
-227
-4
-3
-5
-4
-2
-12
-9
-17
-3
-7
-14
-6
-4
-5
-21
-4
-8
-9
-2
-9
-25
-16
-3
-6
-4
-7
-8
-5
-2
-3
-5
-4
-3
-3
-5
-3
-3
-3
-2
-3
-19
-2
-4
-3
-4
-2
-3
-4
-4
-2
-4
-3
-3
-3
-2
-6
-3
-17
-5
-6
-4
-3
-13
-5
-3
-3
-3
-4
-9
-4
-2
-14
-12
-4
-5
-24
-4
-3
-37
-12
-11
-21
-3
-4
-3
-13
-4
-2
-3
-15
-4
-11
-4
-4
-3
-8
-3
-4
-4
-12
-8
-5
-3
-3
-4
-2
-220
-3
-5
-223
-3
-3
-3
-10
-3
-15
-4
-241
-9
-7
-3
-6
-6
-23
-4
-13
-7
-3
-4
-7
-4
-9
-3
-3
-4
-10
-5
-5
-1
-5
-24
-2
-4
-5
-5
-6
-14
-3
-8
-2
-3
-5
-13
-13
-3
-5
-2
-3
-15
-3
-4
-2
-10
-4
-4
-4
-5
-5
-3
-5
-3
-4
-7
-4
-27
-3
-6
-4
-15
-3
-5
-6
-6
-5
-4
-8
-3
-9
-2
-6
-3
-4
-3
-7
-4
-18
-3
-11
-3
-3
-8
-9
-7
-24
-3
-219
-7
-10
-4
-5
-9
-12
-2
-5
-4
-4
-4
-3
-3
-19
-5
-8
-16
-8
-6
-22
-3
-23
-3
-242
-9
-4
-3
-3
-5
-7
-3
-3
-5
-8
-3
-7
-5
-14
-8
-10
-3
-4
-3
-7
-4
-6
-7
-4
-10
-4
-3
-11
-3
-7
-10
-3
-13
-6
-8
-12
-10
-5
-7
-9
-3
-4
-7
-7
-10
-8
-30
-9
-19
-4
-3
-19
-15
-4
-13
-3
-215
-223
-4
-7
-4
-8
-17
-16
-3
-7
-6
-5
-5
-4
-12
-3
-7
-4
-4
-13
-4
-5
-2
-5
-6
-5
-6
-6
-7
-10
-18
-23
-9
-3
-3
-6
-5
-2
-4
-2
-7
-3
-3
-2
-5
-5
-14
-10
-224
-6
-3
-4
-3
-7
-5
-9
-3
-6
-4
-2
-5
-11
-4
-3
-3
-2
-8
-4
-7
-4
-10
-7
-3
-3
-18
-18
-17
-3
-3
-3
-4
-5
-3
-3
-4
-12
-7
-3
-11
-13
-5
-4
-7
-13
-5
-4
-11
-3
-12
-3
-6
-4
-4
-21
-4
-6
-9
-5
-3
-10
-8
-4
-6
-4
-4
-6
-5
-4
-8
-6
-4
-6
-4
-4
-5
-9
-6
-3
-4
-2
-9
-3
-18
-2
-4
-3
-13
-3
-6
-6
-8
-7
-9
-3
-2
-16
-3
-4
-6
-3
-2
-33
-22
-14
-4
-9
-12
-4
-5
-6
-3
-23
-9
-4
-3
-5
-5
-3
-4
-5
-3
-5
-3
-10
-4
-5
-5
-8
-4
-4
-6
-8
-5
-4
-3
-4
-6
-3
-3
-3
-5
-9
-12
-6
-5
-9
-3
-5
-3
-2
-2
-2
-18
-3
-2
-21
-2
-5
-4
-6
-4
-5
-10
-3
-9
-3
-2
-10
-7
-3
-6
-6
-4
-4
-8
-12
-7
-3
-7
-3
-3
-9
-3
-4
-5
-4
-4
-5
-5
-10
-15
-4
-4
-14
-6
-227
-3
-14
-5
-216
-22
-5
-4
-2
-2
-6
-3
-4
-2
-9
-9
-4
-3
-28
-13
-11
-4
-5
-3
-3
-2
-3
-3
-5
-3
-4
-3
-5
-23
-26
-3
-4
-5
-6
-4
-6
-3
-5
-5
-3
-4
-3
-2
-2
-2
-7
-14
-3
-6
-7
-17
-2
-2
-15
-14
-16
-4
-6
-7
-13
-6
-4
-5
-6
-16
-3
-3
-28
-3
-6
-15
-3
-9
-2
-4
-6
-3
-3
-22
-4
-12
-6
-7
-2
-5
-4
-10
-3
-16
-6
-9
-2
-5
-12
-7
-5
-5
-5
-5
-2
-11
-9
-17
-4
-3
-11
-7
-3
-5
-15
-4
-3
-4
-211
-8
-7
-5
-4
-7
-6
-7
-6
-3
-6
-5
-6
-5
-3
-4
-4
-26
-4
-6
-10
-4
-4
-3
-2
-3
-3
-4
-5
-9
-3
-9
-4
-4
-5
-5
-8
-2
-4
-2
-3
-8
-4
-11
-19
-5
-8
-6
-3
-5
-6
-12
-3
-2
-4
-16
-12
-3
-4
-4
-8
-6
-5
-6
-6
-219
-8
-222
-6
-16
-3
-13
-19
-5
-4
-3
-11
-6
-10
-4
-7
-7
-12
-5
-3
-3
-5
-6
-10
-3
-8
-2
-5
-4
-7
-2
-4
-4
-2
-12
-9
-6
-4
-2
-40
-2
-4
-10
-4
-223
-4
-2
-20
-6
-7
-24
-5
-4
-5
-2
-20
-16
-6
-5
-13
-2
-3
-3
-19
-3
-2
-4
-5
-6
-7
-11
-12
-5
-6
-7
-7
-3
-5
-3
-5
-3
-14
-3
-4
-4
-2
-11
-1
-7
-3
-9
-6
-11
-12
-5
-8
-6
-221
-4
-2
-12
-4
-3
-15
-4
-5
-226
-7
-218
-7
-5
-4
-5
-18
-4
-5
-9
-4
-4
-2
-9
-18
-18
-9
-5
-6
-6
-3
-3
-7
-3
-5
-4
-4
-4
-12
-3
-6
-31
-5
-4
-7
-3
-6
-5
-6
-5
-11
-2
-2
-11
-11
-6
-7
-5
-8
-7
-10
-5
-23
-7
-4
-3
-5
-34
-2
-5
-23
-7
-3
-6
-8
-4
-4
-4
-2
-5
-3
-8
-5
-4
-8
-25
-2
-3
-17
-8
-3
-4
-8
-7
-3
-15
-6
-5
-7
-21
-9
-5
-6
-6
-5
-3
-2
-3
-10
-3
-6
-3
-14
-7
-4
-4
-8
-7
-8
-2
-6
-12
-4
-213
-6
-5
-21
-8
-2
-5
-23
-3
-11
-2
-3
-6
-25
-2
-3
-6
-7
-6
-6
-4
-4
-6
-3
-17
-9
-7
-6
-4
-3
-10
-7
-2
-3
-3
-3
-11
-8
-3
-7
-6
-4
-14
-36
-3
-4
-3
-3
-22
-13
-21
-4
-2
-7
-4
-4
-17
-15
-3
-7
-11
-2
-4
-7
-6
-209
-6
-3
-2
-2
-24
-4
-9
-4
-3
-3
-3
-29
-2
-2
-4
-3
-3
-5
-4
-6
-3
-3
-2
-4
diff --git a/vendor/src/github.com/beorn7/perks/quantile/stream.go b/vendor/src/github.com/beorn7/perks/quantile/stream.go
deleted file mode 100644
index f4cabd6..0000000
--- a/vendor/src/github.com/beorn7/perks/quantile/stream.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Package quantile computes approximate quantiles over an unbounded data
-// stream within low memory and CPU bounds.
-//
-// A small amount of accuracy is traded to achieve the above properties.
-//
-// Multiple streams can be merged before calling Query to generate a single set
-// of results. This is meaningful when the streams represent the same type of
-// data. See Merge and Samples.
-//
-// For more detailed information about the algorithm used, see:
-//
-// Effective Computation of Biased Quantiles over Data Streams
-//
-// http://www.cs.rutgers.edu/~muthu/bquant.pdf
-package quantile
-
-import (
- "math"
- "sort"
-)
-
-// Sample holds an observed value and meta information for compression. JSON
-// tags have been added for convenience.
-type Sample struct {
- Value float64 `json:",string"`
- Width float64 `json:",string"`
- Delta float64 `json:",string"`
-}
-
-// Samples represents a slice of samples. It implements sort.Interface.
-type Samples []Sample
-
-func (a Samples) Len() int { return len(a) }
-func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
-func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-
-type invariant func(s *stream, r float64) float64
-
-// NewLowBiased returns an initialized Stream for low-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the lower ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewLowBiased(epsilon float64) *Stream {
- Æ’ := func(s *stream, r float64) float64 {
- return 2 * epsilon * r
- }
- return newStream(Æ’)
-}
-
-// NewHighBiased returns an initialized Stream for high-biased quantiles
-// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
-// error guarantees can still be given even for the higher ranks of the data
-// distribution.
-//
-// The provided epsilon is a relative error, i.e. the true quantile of a value
-// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
-// properties.
-func NewHighBiased(epsilon float64) *Stream {
- Æ’ := func(s *stream, r float64) float64 {
- return 2 * epsilon * (s.n - r)
- }
- return newStream(Æ’)
-}
-
-// NewTargeted returns an initialized Stream concerned with a particular set of
-// quantile values that are supplied a priori. Knowing these a priori reduces
-// space and computation time. The targets map maps the desired quantiles to
-// their absolute errors, i.e. the true quantile of a value returned by a query
-// is guaranteed to be within (Quantile±Epsilon).
-//
-// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
-func NewTargeted(targets map[float64]float64) *Stream {
- Æ’ := func(s *stream, r float64) float64 {
- var m = math.MaxFloat64
- var f float64
- for quantile, epsilon := range targets {
- if quantile*s.n <= r {
- f = (2 * epsilon * r) / quantile
- } else {
- f = (2 * epsilon * (s.n - r)) / (1 - quantile)
- }
- if f < m {
- m = f
- }
- }
- return m
- }
- return newStream(Æ’)
-}
-
-// Stream computes quantiles for a stream of float64s. It is not thread-safe by
-// design. Take care when using across multiple goroutines.
-type Stream struct {
- *stream
- b Samples
- sorted bool
-}
-
-func newStream(Æ’ invariant) *Stream {
- x := &stream{Æ’: Æ’}
- return &Stream{x, make(Samples, 0, 500), true}
-}
-
-// Insert inserts v into the stream.
-func (s *Stream) Insert(v float64) {
- s.insert(Sample{Value: v, Width: 1})
-}
-
-func (s *Stream) insert(sample Sample) {
- s.b = append(s.b, sample)
- s.sorted = false
- if len(s.b) == cap(s.b) {
- s.flush()
- }
-}
-
-// Query returns the computed qth percentiles value. If s was created with
-// NewTargeted, and q is not in the set of quantiles provided a priori, Query
-// will return an unspecified result.
-func (s *Stream) Query(q float64) float64 {
- if !s.flushed() {
- // Fast path when there hasn't been enough data for a flush;
- // this also yields better accuracy for small sets of data.
- l := len(s.b)
- if l == 0 {
- return 0
- }
- i := int(math.Ceil(float64(l) * q))
- if i > 0 {
- i -= 1
- }
- s.maybeSort()
- return s.b[i].Value
- }
- s.flush()
- return s.stream.query(q)
-}
-
-// Merge merges samples into the underlying streams samples. This is handy when
-// merging multiple streams from separate threads, database shards, etc.
-//
-// ATTENTION: This method is broken and does not yield correct results. The
-// underlying algorithm is not capable of merging streams correctly.
-func (s *Stream) Merge(samples Samples) {
- sort.Sort(samples)
- s.stream.merge(samples)
-}
-
-// Reset reinitializes and clears the list reusing the samples buffer memory.
-func (s *Stream) Reset() {
- s.stream.reset()
- s.b = s.b[:0]
-}
-
-// Samples returns stream samples held by s.
-func (s *Stream) Samples() Samples {
- if !s.flushed() {
- return s.b
- }
- s.flush()
- return s.stream.samples()
-}
-
-// Count returns the total number of samples observed in the stream
-// since initialization.
-func (s *Stream) Count() int {
- return len(s.b) + s.stream.count()
-}
-
-func (s *Stream) flush() {
- s.maybeSort()
- s.stream.merge(s.b)
- s.b = s.b[:0]
-}
-
-func (s *Stream) maybeSort() {
- if !s.sorted {
- s.sorted = true
- sort.Sort(s.b)
- }
-}
-
-func (s *Stream) flushed() bool {
- return len(s.stream.l) > 0
-}
-
-type stream struct {
- n float64
- l []Sample
- Æ’ invariant
-}
-
-func (s *stream) reset() {
- s.l = s.l[:0]
- s.n = 0
-}
-
-func (s *stream) insert(v float64) {
- s.merge(Samples{{v, 1, 0}})
-}
-
-func (s *stream) merge(samples Samples) {
- // TODO(beorn7): This tries to merge not only individual samples, but
- // whole summaries. The paper doesn't mention merging summaries at
- // all. Unittests show that the merging is inaccurate. Find out how to
- // do merges properly.
- var r float64
- i := 0
- for _, sample := range samples {
- for ; i < len(s.l); i++ {
- c := s.l[i]
- if c.Value > sample.Value {
- // Insert at position i.
- s.l = append(s.l, Sample{})
- copy(s.l[i+1:], s.l[i:])
- s.l[i] = Sample{
- sample.Value,
- sample.Width,
- math.Max(sample.Delta, math.Floor(s.Æ’(s, r))-1),
- // TODO(beorn7): How to calculate delta correctly?
- }
- i++
- goto inserted
- }
- r += c.Width
- }
- s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
- i++
- inserted:
- s.n += sample.Width
- r += sample.Width
- }
- s.compress()
-}
-
-func (s *stream) count() int {
- return int(s.n)
-}
-
-func (s *stream) query(q float64) float64 {
- t := math.Ceil(q * s.n)
- t += math.Ceil(s.Æ’(s, t) / 2)
- p := s.l[0]
- var r float64
- for _, c := range s.l[1:] {
- r += p.Width
- if r+c.Width+c.Delta > t {
- return p.Value
- }
- p = c
- }
- return p.Value
-}
-
-func (s *stream) compress() {
- if len(s.l) < 2 {
- return
- }
- x := s.l[len(s.l)-1]
- xi := len(s.l) - 1
- r := s.n - 1 - x.Width
-
- for i := len(s.l) - 2; i >= 0; i-- {
- c := s.l[i]
- if c.Width+x.Width+x.Delta <= s.Æ’(s, r) {
- x.Width += c.Width
- s.l[xi] = x
- // Remove element at i.
- copy(s.l[i:], s.l[i+1:])
- s.l = s.l[:len(s.l)-1]
- xi -= 1
- } else {
- x = c
- xi = i
- }
- r -= c.Width
- }
-}
-
-func (s *stream) samples() Samples {
- samples := make(Samples, len(s.l))
- copy(samples, s.l)
- return samples
-}
diff --git a/vendor/src/github.com/beorn7/perks/quantile/stream_test.go b/vendor/src/github.com/beorn7/perks/quantile/stream_test.go
deleted file mode 100644
index 8551950..0000000
--- a/vendor/src/github.com/beorn7/perks/quantile/stream_test.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package quantile
-
-import (
- "math"
- "math/rand"
- "sort"
- "testing"
-)
-
-var (
- Targets = map[float64]float64{
- 0.01: 0.001,
- 0.10: 0.01,
- 0.50: 0.05,
- 0.90: 0.01,
- 0.99: 0.001,
- }
- TargetsSmallEpsilon = map[float64]float64{
- 0.01: 0.0001,
- 0.10: 0.001,
- 0.50: 0.005,
- 0.90: 0.001,
- 0.99: 0.0001,
- }
- LowQuantiles = []float64{0.01, 0.1, 0.5}
- HighQuantiles = []float64{0.99, 0.9, 0.5}
-)
-
-const RelativeEpsilon = 0.01
-
-func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) {
- sort.Float64s(a)
- for quantile, epsilon := range Targets {
- n := float64(len(a))
- k := int(quantile * n)
- if k < 1 {
- k = 1
- }
- lower := int((quantile - epsilon) * n)
- if lower < 1 {
- lower = 1
- }
- upper := int(math.Ceil((quantile + epsilon) * n))
- if upper > len(a) {
- upper = len(a)
- }
- w, min, max := a[k-1], a[lower-1], a[upper-1]
- if g := s.Query(quantile); g < min || g > max {
- t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g)
- }
- }
-}
-
-func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
- sort.Float64s(a)
- for _, qu := range LowQuantiles {
- n := float64(len(a))
- k := int(qu * n)
-
- lowerRank := int((1 - RelativeEpsilon) * qu * n)
- upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n))
- w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
- if g := s.Query(qu); g < min || g > max {
- t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
- }
- }
-}
-
-func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
- sort.Float64s(a)
- for _, qu := range HighQuantiles {
- n := float64(len(a))
- k := int(qu * n)
-
- lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n)
- upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n))
- w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
- if g := s.Query(qu); g < min || g > max {
- t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
- }
- }
-}
-
-func populateStream(s *Stream) []float64 {
- a := make([]float64, 0, 1e5+100)
- for i := 0; i < cap(a); i++ {
- v := rand.NormFloat64()
- // Add 5% asymmetric outliers.
- if i%20 == 0 {
- v = v*v + 1
- }
- s.Insert(v)
- a = append(a, v)
- }
- return a
-}
-
-func TestTargetedQuery(t *testing.T) {
- rand.Seed(42)
- s := NewTargeted(Targets)
- a := populateStream(s)
- verifyPercsWithAbsoluteEpsilon(t, a, s)
-}
-
-func TestTargetedQuerySmallSampleSize(t *testing.T) {
- rand.Seed(42)
- s := NewTargeted(TargetsSmallEpsilon)
- a := []float64{1, 2, 3, 4, 5}
- for _, v := range a {
- s.Insert(v)
- }
- verifyPercsWithAbsoluteEpsilon(t, a, s)
- // If not yet flushed, results should be precise:
- if !s.flushed() {
- for φ, want := range map[float64]float64{
- 0.01: 1,
- 0.10: 1,
- 0.50: 3,
- 0.90: 5,
- 0.99: 5,
- } {
- if got := s.Query(φ); got != want {
- t.Errorf("want %f for φ=%f, got %f", want, φ, got)
- }
- }
- }
-}
-
-func TestLowBiasedQuery(t *testing.T) {
- rand.Seed(42)
- s := NewLowBiased(RelativeEpsilon)
- a := populateStream(s)
- verifyLowPercsWithRelativeEpsilon(t, a, s)
-}
-
-func TestHighBiasedQuery(t *testing.T) {
- rand.Seed(42)
- s := NewHighBiased(RelativeEpsilon)
- a := populateStream(s)
- verifyHighPercsWithRelativeEpsilon(t, a, s)
-}
-
-// BrokenTestTargetedMerge is broken, see Merge doc comment.
-func BrokenTestTargetedMerge(t *testing.T) {
- rand.Seed(42)
- s1 := NewTargeted(Targets)
- s2 := NewTargeted(Targets)
- a := populateStream(s1)
- a = append(a, populateStream(s2)...)
- s1.Merge(s2.Samples())
- verifyPercsWithAbsoluteEpsilon(t, a, s1)
-}
-
-// BrokenTestLowBiasedMerge is broken, see Merge doc comment.
-func BrokenTestLowBiasedMerge(t *testing.T) {
- rand.Seed(42)
- s1 := NewLowBiased(RelativeEpsilon)
- s2 := NewLowBiased(RelativeEpsilon)
- a := populateStream(s1)
- a = append(a, populateStream(s2)...)
- s1.Merge(s2.Samples())
- verifyLowPercsWithRelativeEpsilon(t, a, s2)
-}
-
-// BrokenTestHighBiasedMerge is broken, see Merge doc comment.
-func BrokenTestHighBiasedMerge(t *testing.T) {
- rand.Seed(42)
- s1 := NewHighBiased(RelativeEpsilon)
- s2 := NewHighBiased(RelativeEpsilon)
- a := populateStream(s1)
- a = append(a, populateStream(s2)...)
- s1.Merge(s2.Samples())
- verifyHighPercsWithRelativeEpsilon(t, a, s2)
-}
-
-func TestUncompressed(t *testing.T) {
- q := NewTargeted(Targets)
- for i := 100; i > 0; i-- {
- q.Insert(float64(i))
- }
- if g := q.Count(); g != 100 {
- t.Errorf("want count 100, got %d", g)
- }
- // Before compression, Query should have 100% accuracy.
- for quantile := range Targets {
- w := quantile * 100
- if g := q.Query(quantile); g != w {
- t.Errorf("want %f, got %f", w, g)
- }
- }
-}
-
-func TestUncompressedSamples(t *testing.T) {
- q := NewTargeted(map[float64]float64{0.99: 0.001})
- for i := 1; i <= 100; i++ {
- q.Insert(float64(i))
- }
- if g := q.Samples().Len(); g != 100 {
- t.Errorf("want count 100, got %d", g)
- }
-}
-
-func TestUncompressedOne(t *testing.T) {
- q := NewTargeted(map[float64]float64{0.99: 0.01})
- q.Insert(3.14)
- if g := q.Query(0.90); g != 3.14 {
- t.Error("want PI, got", g)
- }
-}
-
-func TestDefaults(t *testing.T) {
- if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 {
- t.Errorf("want 0, got %f", g)
- }
-}
diff --git a/vendor/src/github.com/bradfitz/gomemcache/memcache/memcache.go b/vendor/src/github.com/bradfitz/gomemcache/memcache/memcache.go
deleted file mode 100644
index 7b5442d..0000000
--- a/vendor/src/github.com/bradfitz/gomemcache/memcache/memcache.go
+++ /dev/null
@@ -1,666 +0,0 @@
-/*
-Copyright 2011 Google Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package memcache provides a client for the memcached cache server.
-package memcache
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net"
-
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-// Similar to:
-// http://code.google.com/appengine/docs/go/memcache/reference.html
-
-var (
- // ErrCacheMiss means that a Get failed because the item wasn't present.
- ErrCacheMiss = errors.New("memcache: cache miss")
-
- // ErrCASConflict means that a CompareAndSwap call failed due to the
- // cached value being modified between the Get and the CompareAndSwap.
- // If the cached value was simply evicted rather than replaced,
- // ErrNotStored will be returned instead.
- ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
-
- // ErrNotStored means that a conditional write operation (i.e. Add or
- // CompareAndSwap) failed because the condition was not satisfied.
- ErrNotStored = errors.New("memcache: item not stored")
-
- // ErrServer means that a server error occurred.
- ErrServerError = errors.New("memcache: server error")
-
- // ErrNoStats means that no statistics were available.
- ErrNoStats = errors.New("memcache: no statistics available")
-
- // ErrMalformedKey is returned when an invalid key is used.
- // Keys must be at maximum 250 bytes long, ASCII, and not
- // contain whitespace or control characters.
- ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters")
-
- // ErrNoServers is returned when no servers are configured or available.
- ErrNoServers = errors.New("memcache: no servers configured or available")
-)
-
-// DefaultTimeout is the default socket read/write timeout.
-const DefaultTimeout = 100 * time.Millisecond
-
-const (
- buffered = 8 // arbitrary buffered channel size, for readability
- maxIdleConnsPerAddr = 2 // TODO(bradfitz): make this configurable?
-)
-
-// resumableError returns true if err is only a protocol-level cache error.
-// This is used to determine whether or not a server connection should
-// be re-used or not. If an error occurs, by default we don't reuse the
-// connection, unless it was just a cache error.
-func resumableError(err error) bool {
- switch err {
- case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey:
- return true
- }
- return false
-}
-
-func legalKey(key string) bool {
- if len(key) > 250 {
- return false
- }
- for i := 0; i < len(key); i++ {
- if key[i] <= ' ' || key[i] > 0x7e {
- return false
- }
- }
- return true
-}
-
-var (
- crlf = []byte("\r\n")
- space = []byte(" ")
- resultOK = []byte("OK\r\n")
- resultStored = []byte("STORED\r\n")
- resultNotStored = []byte("NOT_STORED\r\n")
- resultExists = []byte("EXISTS\r\n")
- resultNotFound = []byte("NOT_FOUND\r\n")
- resultDeleted = []byte("DELETED\r\n")
- resultEnd = []byte("END\r\n")
- resultOk = []byte("OK\r\n")
- resultTouched = []byte("TOUCHED\r\n")
-
- resultClientErrorPrefix = []byte("CLIENT_ERROR ")
-)
-
-// New returns a memcache client using the provided server(s)
-// with equal weight. If a server is listed multiple times,
-// it gets a proportional amount of weight.
-func New(server ...string) *Client {
- ss := new(ServerList)
- ss.SetServers(server...)
- return NewFromSelector(ss)
-}
-
-// NewFromSelector returns a new Client using the provided ServerSelector.
-func NewFromSelector(ss ServerSelector) *Client {
- return &Client{selector: ss}
-}
-
-// Client is a memcache client.
-// It is safe for unlocked use by multiple concurrent goroutines.
-type Client struct {
- // Timeout specifies the socket read/write timeout.
- // If zero, DefaultTimeout is used.
- Timeout time.Duration
-
- selector ServerSelector
-
- lk sync.Mutex
- freeconn map[string][]*conn
-}
-
-// Item is an item to be got or stored in a memcached server.
-type Item struct {
- // Key is the Item's key (250 bytes maximum).
- Key string
-
- // Value is the Item's value.
- Value []byte
-
- // Flags are server-opaque flags whose semantics are entirely
- // up to the app.
- Flags uint32
-
- // Expiration is the cache expiration time, in seconds: either a relative
- // time from now (up to 1 month), or an absolute Unix epoch time.
- // Zero means the Item has no expiration time.
- Expiration int32
-
- // Compare and swap ID.
- casid uint64
-}
-
-// conn is a connection to a server.
-type conn struct {
- nc net.Conn
- rw *bufio.ReadWriter
- addr net.Addr
- c *Client
-}
-
-// release returns this connection back to the client's free pool
-func (cn *conn) release() {
- cn.c.putFreeConn(cn.addr, cn)
-}
-
-func (cn *conn) extendDeadline() {
- cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout()))
-}
-
-// condRelease releases this connection if the error pointed to by err
-// is nil (not an error) or is only a protocol level error (e.g. a
-// cache miss). The purpose is to not recycle TCP connections that
-// are bad.
-func (cn *conn) condRelease(err *error) {
- if *err == nil || resumableError(*err) {
- cn.release()
- } else {
- cn.nc.Close()
- }
-}
-
-func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
- c.lk.Lock()
- defer c.lk.Unlock()
- if c.freeconn == nil {
- c.freeconn = make(map[string][]*conn)
- }
- freelist := c.freeconn[addr.String()]
- if len(freelist) >= maxIdleConnsPerAddr {
- cn.nc.Close()
- return
- }
- c.freeconn[addr.String()] = append(freelist, cn)
-}
-
-func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) {
- c.lk.Lock()
- defer c.lk.Unlock()
- if c.freeconn == nil {
- return nil, false
- }
- freelist, ok := c.freeconn[addr.String()]
- if !ok || len(freelist) == 0 {
- return nil, false
- }
- cn = freelist[len(freelist)-1]
- c.freeconn[addr.String()] = freelist[:len(freelist)-1]
- return cn, true
-}
-
-func (c *Client) netTimeout() time.Duration {
- if c.Timeout != 0 {
- return c.Timeout
- }
- return DefaultTimeout
-}
-
-// ConnectTimeoutError is the error type used when it takes
-// too long to connect to the desired host. This level of
-// detail can generally be ignored.
-type ConnectTimeoutError struct {
- Addr net.Addr
-}
-
-func (cte *ConnectTimeoutError) Error() string {
- return "memcache: connect timeout to " + cte.Addr.String()
-}
-
-func (c *Client) dial(addr net.Addr) (net.Conn, error) {
- type connError struct {
- cn net.Conn
- err error
- }
-
- nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout())
- if err == nil {
- return nc, nil
- }
-
- if ne, ok := err.(net.Error); ok && ne.Timeout() {
- return nil, &ConnectTimeoutError{addr}
- }
-
- return nil, err
-}
-
-func (c *Client) getConn(addr net.Addr) (*conn, error) {
- cn, ok := c.getFreeConn(addr)
- if ok {
- cn.extendDeadline()
- return cn, nil
- }
- nc, err := c.dial(addr)
- if err != nil {
- return nil, err
- }
- cn = &conn{
- nc: nc,
- addr: addr,
- rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)),
- c: c,
- }
- cn.extendDeadline()
- return cn, nil
-}
-
-func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error {
- addr, err := c.selector.PickServer(item.Key)
- if err != nil {
- return err
- }
- cn, err := c.getConn(addr)
- if err != nil {
- return err
- }
- defer cn.condRelease(&err)
- if err = fn(c, cn.rw, item); err != nil {
- return err
- }
- return nil
-}
-
-func (c *Client) FlushAll() error {
- return c.selector.Each(c.flushAllFromAddr)
-}
-
-// Get gets the item for the given key. ErrCacheMiss is returned for a
-// memcache cache miss. The key must be at most 250 bytes in length.
-func (c *Client) Get(key string) (item *Item, err error) {
- err = c.withKeyAddr(key, func(addr net.Addr) error {
- return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it })
- })
- if err == nil && item == nil {
- err = ErrCacheMiss
- }
- return
-}
-
-// Touch updates the expiry for the given key. The seconds parameter is either
-// a Unix timestamp or, if seconds is less than 1 month, the number of seconds
-// into the future at which time the item will expire. ErrCacheMiss is returned if the
-// key is not in the cache. The key must be at most 250 bytes in length.
-func (c *Client) Touch(key string, seconds int32) (err error) {
- return c.withKeyAddr(key, func(addr net.Addr) error {
- return c.touchFromAddr(addr, []string{key}, seconds)
- })
-}
-
-func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) {
- if !legalKey(key) {
- return ErrMalformedKey
- }
- addr, err := c.selector.PickServer(key)
- if err != nil {
- return err
- }
- return fn(addr)
-}
-
-func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) {
- cn, err := c.getConn(addr)
- if err != nil {
- return err
- }
- defer cn.condRelease(&err)
- return fn(cn.rw)
-}
-
-func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error {
- return c.withKeyAddr(key, func(addr net.Addr) error {
- return c.withAddrRw(addr, fn)
- })
-}
-
-func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error {
- return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
- if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
- return err
- }
- if err := rw.Flush(); err != nil {
- return err
- }
- if err := parseGetResponse(rw.Reader, cb); err != nil {
- return err
- }
- return nil
- })
-}
-
-// flushAllFromAddr send the flush_all command to the given addr
-func (c *Client) flushAllFromAddr(addr net.Addr) error {
- return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
- if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil {
- return err
- }
- if err := rw.Flush(); err != nil {
- return err
- }
- line, err := rw.ReadSlice('\n')
- if err != nil {
- return err
- }
- switch {
- case bytes.Equal(line, resultOk):
- break
- default:
- return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line))
- }
- return nil
- })
-}
-
-func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error {
- return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
- for _, key := range keys {
- if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil {
- return err
- }
- if err := rw.Flush(); err != nil {
- return err
- }
- line, err := rw.ReadSlice('\n')
- if err != nil {
- return err
- }
- switch {
- case bytes.Equal(line, resultTouched):
- break
- case bytes.Equal(line, resultNotFound):
- return ErrCacheMiss
- default:
- return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line))
- }
- }
- return nil
- })
-}
-
-// GetMulti is a batch version of Get. The returned map from keys to
-// items may have fewer elements than the input slice, due to memcache
-// cache misses. Each key must be at most 250 bytes in length.
-// If no error is returned, the returned map will also be non-nil.
-func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
- var lk sync.Mutex
- m := make(map[string]*Item)
- addItemToMap := func(it *Item) {
- lk.Lock()
- defer lk.Unlock()
- m[it.Key] = it
- }
-
- keyMap := make(map[net.Addr][]string)
- for _, key := range keys {
- if !legalKey(key) {
- return nil, ErrMalformedKey
- }
- addr, err := c.selector.PickServer(key)
- if err != nil {
- return nil, err
- }
- keyMap[addr] = append(keyMap[addr], key)
- }
-
- ch := make(chan error, buffered)
- for addr, keys := range keyMap {
- go func(addr net.Addr, keys []string) {
- ch <- c.getFromAddr(addr, keys, addItemToMap)
- }(addr, keys)
- }
-
- var err error
- for _ = range keyMap {
- if ge := <-ch; ge != nil {
- err = ge
- }
- }
- return m, err
-}
-
-// parseGetResponse reads a GET response from r and calls cb for each
-// read and allocated Item
-func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
- for {
- line, err := r.ReadSlice('\n')
- if err != nil {
- return err
- }
- if bytes.Equal(line, resultEnd) {
- return nil
- }
- it := new(Item)
- size, err := scanGetResponseLine(line, it)
- if err != nil {
- return err
- }
- it.Value, err = ioutil.ReadAll(io.LimitReader(r, int64(size)+2))
- if err != nil {
- return err
- }
- if !bytes.HasSuffix(it.Value, crlf) {
- return fmt.Errorf("memcache: corrupt get result read")
- }
- it.Value = it.Value[:size]
- cb(it)
- }
-}
-
-// scanGetResponseLine populates it and returns the declared size of the item.
-// It does not read the bytes of the item.
-func scanGetResponseLine(line []byte, it *Item) (size int, err error) {
- pattern := "VALUE %s %d %d %d\r\n"
- dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid}
- if bytes.Count(line, space) == 3 {
- pattern = "VALUE %s %d %d\r\n"
- dest = dest[:3]
- }
- n, err := fmt.Sscanf(string(line), pattern, dest...)
- if err != nil || n != len(dest) {
- return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line)
- }
- return size, nil
-}
-
-// Set writes the given item, unconditionally.
-func (c *Client) Set(item *Item) error {
- return c.onItem(item, (*Client).set)
-}
-
-func (c *Client) set(rw *bufio.ReadWriter, item *Item) error {
- return c.populateOne(rw, "set", item)
-}
-
-// Add writes the given item, if no value already exists for its
-// key. ErrNotStored is returned if that condition is not met.
-func (c *Client) Add(item *Item) error {
- return c.onItem(item, (*Client).add)
-}
-
-func (c *Client) add(rw *bufio.ReadWriter, item *Item) error {
- return c.populateOne(rw, "add", item)
-}
-
-// Replace writes the given item, but only if the server *does*
-// already hold data for this key
-func (c *Client) Replace(item *Item) error {
- return c.onItem(item, (*Client).replace)
-}
-
-func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error {
- return c.populateOne(rw, "replace", item)
-}
-
-// CompareAndSwap writes the given item that was previously returned
-// by Get, if the value was neither modified or evicted between the
-// Get and the CompareAndSwap calls. The item's Key should not change
-// between calls but all other item fields may differ. ErrCASConflict
-// is returned if the value was modified in between the
-// calls. ErrNotStored is returned if the value was evicted in between
-// the calls.
-func (c *Client) CompareAndSwap(item *Item) error {
- return c.onItem(item, (*Client).cas)
-}
-
-func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error {
- return c.populateOne(rw, "cas", item)
-}
-
-func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error {
- if !legalKey(item.Key) {
- return ErrMalformedKey
- }
- var err error
- if verb == "cas" {
- _, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n",
- verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid)
- } else {
- _, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n",
- verb, item.Key, item.Flags, item.Expiration, len(item.Value))
- }
- if err != nil {
- return err
- }
- if _, err = rw.Write(item.Value); err != nil {
- return err
- }
- if _, err := rw.Write(crlf); err != nil {
- return err
- }
- if err := rw.Flush(); err != nil {
- return err
- }
- line, err := rw.ReadSlice('\n')
- if err != nil {
- return err
- }
- switch {
- case bytes.Equal(line, resultStored):
- return nil
- case bytes.Equal(line, resultNotStored):
- return ErrNotStored
- case bytes.Equal(line, resultExists):
- return ErrCASConflict
- case bytes.Equal(line, resultNotFound):
- return ErrCacheMiss
- }
- return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line))
-}
-
-func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) {
- _, err := fmt.Fprintf(rw, format, args...)
- if err != nil {
- return nil, err
- }
- if err := rw.Flush(); err != nil {
- return nil, err
- }
- line, err := rw.ReadSlice('\n')
- return line, err
-}
-
-func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error {
- line, err := writeReadLine(rw, format, args...)
- if err != nil {
- return err
- }
- switch {
- case bytes.Equal(line, resultOK):
- return nil
- case bytes.Equal(line, expect):
- return nil
- case bytes.Equal(line, resultNotStored):
- return ErrNotStored
- case bytes.Equal(line, resultExists):
- return ErrCASConflict
- case bytes.Equal(line, resultNotFound):
- return ErrCacheMiss
- }
- return fmt.Errorf("memcache: unexpected response line: %q", string(line))
-}
-
-// Delete deletes the item with the provided key. The error ErrCacheMiss is
-// returned if the item didn't already exist in the cache.
-func (c *Client) Delete(key string) error {
- return c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
- return writeExpectf(rw, resultDeleted, "delete %s\r\n", key)
- })
-}
-
-// DeleteAll deletes all items in the cache.
-func (c *Client) DeleteAll() error {
- return c.withKeyRw("", func(rw *bufio.ReadWriter) error {
- return writeExpectf(rw, resultDeleted, "flush_all\r\n")
- })
-}
-
-// Increment atomically increments key by delta. The return value is
-// the new value after being incremented or an error. If the value
-// didn't exist in memcached the error is ErrCacheMiss. The value in
-// memcached must be an decimal number, or an error will be returned.
-// On 64-bit overflow, the new value wraps around.
-func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
- return c.incrDecr("incr", key, delta)
-}
-
-// Decrement atomically decrements key by delta. The return value is
-// the new value after being decremented or an error. If the value
-// didn't exist in memcached the error is ErrCacheMiss. The value in
-// memcached must be an decimal number, or an error will be returned.
-// On underflow, the new value is capped at zero and does not wrap
-// around.
-func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
- return c.incrDecr("decr", key, delta)
-}
-
-func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
- var val uint64
- err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
- line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta)
- if err != nil {
- return err
- }
- switch {
- case bytes.Equal(line, resultNotFound):
- return ErrCacheMiss
- case bytes.HasPrefix(line, resultClientErrorPrefix):
- errMsg := line[len(resultClientErrorPrefix) : len(line)-2]
- return errors.New("memcache: client error: " + string(errMsg))
- }
- val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
- if err != nil {
- return err
- }
- return nil
- })
- return val, err
-}
diff --git a/vendor/src/github.com/bradfitz/gomemcache/memcache/memcache_test.go b/vendor/src/github.com/bradfitz/gomemcache/memcache/memcache_test.go
deleted file mode 100644
index 619ce6d..0000000
--- a/vendor/src/github.com/bradfitz/gomemcache/memcache/memcache_test.go
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
-Copyright 2011 Google Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package memcache provides a client for the memcached cache server.
-package memcache
-
-import (
- "fmt"
- "net"
- "os"
- "os/exec"
- "strings"
- "testing"
- "time"
-)
-
-const testServer = "localhost:11211"
-
-func setup(t *testing.T) bool {
- c, err := net.Dial("tcp", testServer)
- if err != nil {
- t.Skipf("skipping test; no server running at %s", testServer)
- }
- c.Write([]byte("flush_all\r\n"))
- c.Close()
- return true
-}
-
-func TestLocalhost(t *testing.T) {
- if !setup(t) {
- return
- }
- testWithClient(t, New(testServer))
-}
-
-// Run the memcached binary as a child process and connect to its unix socket.
-func TestUnixSocket(t *testing.T) {
- sock := fmt.Sprintf("/tmp/test-gomemcache-%d.sock", os.Getpid())
- cmd := exec.Command("memcached", "-s", sock)
- if err := cmd.Start(); err != nil {
- t.Skipf("skipping test; couldn't find memcached")
- return
- }
- defer cmd.Wait()
- defer cmd.Process.Kill()
-
- // Wait a bit for the socket to appear.
- for i := 0; i < 10; i++ {
- if _, err := os.Stat(sock); err == nil {
- break
- }
- time.Sleep(time.Duration(25*i) * time.Millisecond)
- }
-
- testWithClient(t, New(sock))
-}
-
-func mustSetF(t *testing.T, c *Client) func(*Item) {
- return func(it *Item) {
- if err := c.Set(it); err != nil {
- t.Fatalf("failed to Set %#v: %v", *it, err)
- }
- }
-}
-
-func testWithClient(t *testing.T, c *Client) {
- checkErr := func(err error, format string, args ...interface{}) {
- if err != nil {
- t.Fatalf(format, args...)
- }
- }
- mustSet := mustSetF(t, c)
-
- // Set
- foo := &Item{Key: "foo", Value: []byte("fooval"), Flags: 123}
- err := c.Set(foo)
- checkErr(err, "first set(foo): %v", err)
- err = c.Set(foo)
- checkErr(err, "second set(foo): %v", err)
-
- // Get
- it, err := c.Get("foo")
- checkErr(err, "get(foo): %v", err)
- if it.Key != "foo" {
- t.Errorf("get(foo) Key = %q, want foo", it.Key)
- }
- if string(it.Value) != "fooval" {
- t.Errorf("get(foo) Value = %q, want fooval", string(it.Value))
- }
- if it.Flags != 123 {
- t.Errorf("get(foo) Flags = %v, want 123", it.Flags)
- }
-
- // Add
- bar := &Item{Key: "bar", Value: []byte("barval")}
- err = c.Add(bar)
- checkErr(err, "first add(foo): %v", err)
- if err := c.Add(bar); err != ErrNotStored {
- t.Fatalf("second add(foo) want ErrNotStored, got %v", err)
- }
-
- // Replace
- baz := &Item{Key: "baz", Value: []byte("bazvalue")}
- if err := c.Replace(baz); err != ErrNotStored {
- t.Fatalf("expected replace(baz) to return ErrNotStored, got %v", err)
- }
- err = c.Replace(bar)
- checkErr(err, "replaced(foo): %v", err)
-
- // GetMulti
- m, err := c.GetMulti([]string{"foo", "bar"})
- checkErr(err, "GetMulti: %v", err)
- if g, e := len(m), 2; g != e {
- t.Errorf("GetMulti: got len(map) = %d, want = %d", g, e)
- }
- if _, ok := m["foo"]; !ok {
- t.Fatalf("GetMulti: didn't get key 'foo'")
- }
- if _, ok := m["bar"]; !ok {
- t.Fatalf("GetMulti: didn't get key 'bar'")
- }
- if g, e := string(m["foo"].Value), "fooval"; g != e {
- t.Errorf("GetMulti: foo: got %q, want %q", g, e)
- }
- if g, e := string(m["bar"].Value), "barval"; g != e {
- t.Errorf("GetMulti: bar: got %q, want %q", g, e)
- }
-
- // Delete
- err = c.Delete("foo")
- checkErr(err, "Delete: %v", err)
- it, err = c.Get("foo")
- if err != ErrCacheMiss {
- t.Errorf("post-Delete want ErrCacheMiss, got %v", err)
- }
-
- // Incr/Decr
- mustSet(&Item{Key: "num", Value: []byte("42")})
- n, err := c.Increment("num", 8)
- checkErr(err, "Increment num + 8: %v", err)
- if n != 50 {
- t.Fatalf("Increment num + 8: want=50, got=%d", n)
- }
- n, err = c.Decrement("num", 49)
- checkErr(err, "Decrement: %v", err)
- if n != 1 {
- t.Fatalf("Decrement 49: want=1, got=%d", n)
- }
- err = c.Delete("num")
- checkErr(err, "delete num: %v", err)
- n, err = c.Increment("num", 1)
- if err != ErrCacheMiss {
- t.Fatalf("increment post-delete: want ErrCacheMiss, got %v", err)
- }
- mustSet(&Item{Key: "num", Value: []byte("not-numeric")})
- n, err = c.Increment("num", 1)
- if err == nil || !strings.Contains(err.Error(), "client error") {
- t.Fatalf("increment non-number: want client error, got %v", err)
- }
- testTouchWithClient(t, c)
-
- // Test Delete All
- err = c.DeleteAll()
- checkErr(err, "DeleteAll: %v", err)
- it, err = c.Get("bar")
- if err != ErrCacheMiss {
- t.Errorf("post-DeleteAll want ErrCacheMiss, got %v", err)
- }
-
-}
-
-func testTouchWithClient(t *testing.T, c *Client) {
- if testing.Short() {
- t.Log("Skipping testing memcache Touch with testing in Short mode")
- return
- }
-
- mustSet := mustSetF(t, c)
-
- const secondsToExpiry = int32(2)
-
- // We will set foo and bar to expire in 2 seconds, then we'll keep touching
- // foo every second
- // After 3 seconds, we expect foo to be available, and bar to be expired
- foo := &Item{Key: "foo", Value: []byte("fooval"), Expiration: secondsToExpiry}
- bar := &Item{Key: "bar", Value: []byte("barval"), Expiration: secondsToExpiry}
-
- setTime := time.Now()
- mustSet(foo)
- mustSet(bar)
-
- for s := 0; s < 3; s++ {
- time.Sleep(time.Duration(1 * time.Second))
- err := c.Touch(foo.Key, secondsToExpiry)
- if nil != err {
- t.Errorf("error touching foo: %v", err.Error())
- }
- }
-
- _, err := c.Get("foo")
- if err != nil {
- if err == ErrCacheMiss {
- t.Fatalf("touching failed to keep item foo alive")
- } else {
- t.Fatalf("unexpected error retrieving foo after touching: %v", err.Error())
- }
- }
-
- _, err = c.Get("bar")
- if nil == err {
- t.Fatalf("item bar did not expire within %v seconds", time.Now().Sub(setTime).Seconds())
- } else {
- if err != ErrCacheMiss {
- t.Fatalf("unexpected error retrieving bar: %v", err.Error())
- }
- }
-}
diff --git a/vendor/src/github.com/bradfitz/gomemcache/memcache/selector.go b/vendor/src/github.com/bradfitz/gomemcache/memcache/selector.go
deleted file mode 100644
index 10b04d3..0000000
--- a/vendor/src/github.com/bradfitz/gomemcache/memcache/selector.go
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
-Copyright 2011 Google Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package memcache
-
-import (
- "hash/crc32"
- "net"
- "strings"
- "sync"
-)
-
-// ServerSelector is the interface that selects a memcache server
-// as a function of the item's key.
-//
-// All ServerSelector implementations must be safe for concurrent use
-// by multiple goroutines.
-type ServerSelector interface {
- // PickServer returns the server address that a given item
- // should be shared onto.
- PickServer(key string) (net.Addr, error)
- Each(func(net.Addr) error) error
-}
-
-// ServerList is a simple ServerSelector. Its zero value is usable.
-type ServerList struct {
- mu sync.RWMutex
- addrs []net.Addr
-}
-
-// SetServers changes a ServerList's set of servers at runtime and is
-// safe for concurrent use by multiple goroutines.
-//
-// Each server is given equal weight. A server is given more weight
-// if it's listed multiple times.
-//
-// SetServers returns an error if any of the server names fail to
-// resolve. No attempt is made to connect to the server. If any error
-// is returned, no changes are made to the ServerList.
-func (ss *ServerList) SetServers(servers ...string) error {
- naddr := make([]net.Addr, len(servers))
- for i, server := range servers {
- if strings.Contains(server, "/") {
- addr, err := net.ResolveUnixAddr("unix", server)
- if err != nil {
- return err
- }
- naddr[i] = addr
- } else {
- tcpaddr, err := net.ResolveTCPAddr("tcp", server)
- if err != nil {
- return err
- }
- naddr[i] = tcpaddr
- }
- }
-
- ss.mu.Lock()
- defer ss.mu.Unlock()
- ss.addrs = naddr
- return nil
-}
-
-// Each iterates over each server calling the given function
-func (ss *ServerList) Each(f func(net.Addr) error) error {
- ss.mu.RLock()
- defer ss.mu.RUnlock()
- for _, a := range ss.addrs {
- if err := f(a); nil != err {
- return err
- }
- }
- return nil
-}
-
-// keyBufPool returns []byte buffers for use by PickServer's call to
-// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the
-// copies, which at least are bounded in size and small)
-var keyBufPool = sync.Pool{
- New: func() interface{} {
- b := make([]byte, 256)
- return &b
- },
-}
-
-func (ss *ServerList) PickServer(key string) (net.Addr, error) {
- ss.mu.RLock()
- defer ss.mu.RUnlock()
- if len(ss.addrs) == 0 {
- return nil, ErrNoServers
- }
- if len(ss.addrs) == 1 {
- return ss.addrs[0], nil
- }
- bufp := keyBufPool.Get().(*[]byte)
- n := copy(*bufp, key)
- cs := crc32.ChecksumIEEE((*bufp)[:n])
- keyBufPool.Put(bufp)
-
- return ss.addrs[cs%uint32(len(ss.addrs))], nil
-}
diff --git a/vendor/src/github.com/bradfitz/gomemcache/memcache/selector_test.go b/vendor/src/github.com/bradfitz/gomemcache/memcache/selector_test.go
deleted file mode 100644
index 65a2c4d..0000000
--- a/vendor/src/github.com/bradfitz/gomemcache/memcache/selector_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
-Copyright 2014 Google Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package memcache
-
-import "testing"
-
-func BenchmarkPickServer(b *testing.B) {
- // at least two to avoid 0 and 1 special cases:
- benchPickServer(b, "127.0.0.1:1234", "127.0.0.1:1235")
-}
-
-func BenchmarkPickServer_Single(b *testing.B) {
- benchPickServer(b, "127.0.0.1:1234")
-}
-
-func benchPickServer(b *testing.B, servers ...string) {
- b.ReportAllocs()
- var ss ServerList
- ss.SetServers(servers...)
- for i := 0; i < b.N; i++ {
- if _, err := ss.PickServer("some key"); err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/vendor/src/github.com/cenkalti/backoff/LICENSE b/vendor/src/github.com/cenkalti/backoff/LICENSE
deleted file mode 100644
index 89b8179..0000000
--- a/vendor/src/github.com/cenkalti/backoff/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Cenk Altı
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/src/github.com/cenkalti/backoff/README.md b/vendor/src/github.com/cenkalti/backoff/README.md
deleted file mode 100644
index 0338a31..0000000
--- a/vendor/src/github.com/cenkalti/backoff/README.md
+++ /dev/null
@@ -1,118 +0,0 @@
-# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
-
-This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
-
-[Exponential backoff][exponential backoff wiki]
-is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
-in order to gradually find an acceptable rate.
-The retries exponentially increase and stop increasing when a certain threshold is met.
-
-## How To
-
-We define two functions, `Retry()` and `RetryNotify()`.
-They receive an `Operation` to execute, a `BackOff` algorithm,
-and an optional `Notify` error handler.
-
-The operation will be executed, and will be retried on failure with delay
-as given by the backoff algorithm. The backoff algorithm can also decide when to stop
-retrying.
-In addition, the notify error handler will be called after each failed attempt,
-except for the last time, whose error should be handled by the caller.
-
-```go
-// An Operation is executing by Retry() or RetryNotify().
-// The operation will be retried using a backoff policy if it returns an error.
-type Operation func() error
-
-// Notify is a notify-on-error function. It receives an operation error and
-// backoff delay if the operation failed (with an error).
-//
-// NOTE that if the backoff policy stated to stop retrying,
-// the notify function isn't called.
-type Notify func(error, time.Duration)
-
-func Retry(Operation, BackOff) error
-func RetryNotify(Operation, BackOff, Notify)
-```
-
-## Examples
-
-### Retry
-
-Simple retry helper that uses the default exponential backoff algorithm:
-
-```go
-operation := func() error {
- // An operation that might fail.
- return nil // or return errors.New("some error")
-}
-
-err := Retry(operation, NewExponentialBackOff())
-if err != nil {
- // Handle error.
- return err
-}
-
-// Operation is successful.
-return nil
-```
-
-### Ticker
-
-Ticker is for using backoff algorithms with channels.
-
-```go
-operation := func() error {
- // An operation that might fail
- return nil // or return errors.New("some error")
-}
-
-b := NewExponentialBackOff()
-ticker := NewTicker(b)
-
-var err error
-
-// Ticks will continue to arrive when the previous operation is still running,
-// so operations that take a while to fail could run in quick succession.
-for range ticker.C {
- if err = operation(); err != nil {
- log.Println(err, "will retry...")
- continue
- }
-
- ticker.Stop()
- break
-}
-
-if err != nil {
- // Operation has failed.
- return err
-}
-
-// Operation is successful.
-return nil
-```
-
-## Getting Started
-
-```bash
-# install
-$ go get github.com/cenk/backoff
-
-# test
-$ cd $GOPATH/src/github.com/cenk/backoff
-$ go get -t ./...
-$ go test -v -cover
-```
-
-[godoc]: https://godoc.org/github.com/cenk/backoff
-[godoc image]: https://godoc.org/github.com/cenk/backoff?status.png
-[travis]: https://travis-ci.org/cenk/backoff
-[travis image]: https://travis-ci.org/cenk/backoff.png?branch=master
-[coveralls]: https://coveralls.io/github/cenk/backoff?branch=master
-[coveralls image]: https://coveralls.io/repos/github/cenk/backoff/badge.svg?branch=master
-
-[google-http-java-client]: https://github.com/google/google-http-java-client
-[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
-
-[advanced example]: https://godoc.org/github.com/cenk/backoff#example_
diff --git a/vendor/src/github.com/cenkalti/backoff/backoff.go b/vendor/src/github.com/cenkalti/backoff/backoff.go
deleted file mode 100644
index 61bd6df..0000000
--- a/vendor/src/github.com/cenkalti/backoff/backoff.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Package backoff implements backoff algorithms for retrying operations.
-//
-// Also has a Retry() helper for retrying operations that may fail.
-package backoff
-
-import "time"
-
-// BackOff is a backoff policy for retrying an operation.
-type BackOff interface {
- // NextBackOff returns the duration to wait before retrying the operation,
- // or backoff.Stop to indicate that no more retries should be made.
- //
- // Example usage:
- //
- // duration := backoff.NextBackOff();
- // if (duration == backoff.Stop) {
- // // Do not retry operation.
- // } else {
- // // Sleep for duration and retry operation.
- // }
- //
- NextBackOff() time.Duration
-
- // Reset to initial state.
- Reset()
-}
-
-// Indicates that no more retries should be made for use in NextBackOff().
-const Stop time.Duration = -1
-
-// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
-// meaning that the operation is retried immediately without waiting, indefinitely.
-type ZeroBackOff struct{}
-
-func (b *ZeroBackOff) Reset() {}
-
-func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
-
-// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
-// NextBackOff(), meaning that the operation should never be retried.
-type StopBackOff struct{}
-
-func (b *StopBackOff) Reset() {}
-
-func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
-
-// ConstantBackOff is a backoff policy that always returns the same backoff delay.
-// This is in contrast to an exponential backoff policy,
-// which returns a delay that grows longer as you call NextBackOff() over and over again.
-type ConstantBackOff struct {
- Interval time.Duration
-}
-
-func (b *ConstantBackOff) Reset() {}
-func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
-
-func NewConstantBackOff(d time.Duration) *ConstantBackOff {
- return &ConstantBackOff{Interval: d}
-}
diff --git a/vendor/src/github.com/cenkalti/backoff/backoff_test.go b/vendor/src/github.com/cenkalti/backoff/backoff_test.go
deleted file mode 100644
index 91f27c4..0000000
--- a/vendor/src/github.com/cenkalti/backoff/backoff_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package backoff
-
-import (
- "testing"
- "time"
-)
-
-func TestNextBackOffMillis(t *testing.T) {
- subtestNextBackOff(t, 0, new(ZeroBackOff))
- subtestNextBackOff(t, Stop, new(StopBackOff))
-}
-
-func subtestNextBackOff(t *testing.T, expectedValue time.Duration, backOffPolicy BackOff) {
- for i := 0; i < 10; i++ {
- next := backOffPolicy.NextBackOff()
- if next != expectedValue {
- t.Errorf("got: %d expected: %d", next, expectedValue)
- }
- }
-}
-
-func TestConstantBackOff(t *testing.T) {
- backoff := NewConstantBackOff(time.Second)
- if backoff.NextBackOff() != time.Second {
- t.Error("invalid interval")
- }
-}
diff --git a/vendor/src/github.com/cenkalti/backoff/example_test.go b/vendor/src/github.com/cenkalti/backoff/example_test.go
deleted file mode 100644
index 0d1852e..0000000
--- a/vendor/src/github.com/cenkalti/backoff/example_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package backoff
-
-import "log"
-
-func ExampleRetry() error {
- operation := func() error {
- // An operation that might fail.
- return nil // or return errors.New("some error")
- }
-
- err := Retry(operation, NewExponentialBackOff())
- if err != nil {
- // Handle error.
- return err
- }
-
- // Operation is successful.
- return nil
-}
-
-func ExampleTicker() error {
- operation := func() error {
- // An operation that might fail
- return nil // or return errors.New("some error")
- }
-
- b := NewExponentialBackOff()
- ticker := NewTicker(b)
-
- var err error
-
- // Ticks will continue to arrive when the previous operation is still running,
- // so operations that take a while to fail could run in quick succession.
- for _ = range ticker.C {
- if err = operation(); err != nil {
- log.Println(err, "will retry...")
- continue
- }
-
- ticker.Stop()
- break
- }
-
- if err != nil {
- // Operation has failed.
- return err
- }
-
- // Operation is successful.
- return nil
-}
diff --git a/vendor/src/github.com/cenkalti/backoff/exponential.go b/vendor/src/github.com/cenkalti/backoff/exponential.go
deleted file mode 100644
index ae65516..0000000
--- a/vendor/src/github.com/cenkalti/backoff/exponential.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package backoff
-
-import (
- "math/rand"
- "time"
-)
-
-/*
-ExponentialBackOff is a backoff implementation that increases the backoff
-period for each retry attempt using a randomization function that grows exponentially.
-
-NextBackOff() is calculated using the following formula:
-
- randomized interval =
- RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
-
-In other words NextBackOff() will range between the randomization factor
-percentage below and above the retry interval.
-
-For example, given the following parameters:
-
- RetryInterval = 2
- RandomizationFactor = 0.5
- Multiplier = 2
-
-the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
-multiplied by the exponential, that is, between 2 and 6 seconds.
-
-Note: MaxInterval caps the RetryInterval and not the randomized interval.
-
-If the time elapsed since an ExponentialBackOff instance is created goes past the
-MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
-
-The elapsed time can be reset by calling Reset().
-
-Example: Given the following default arguments, for 10 tries the sequence will be,
-and assuming we go over the MaxElapsedTime on the 10th try:
-
- Request # RetryInterval (seconds) Randomized Interval (seconds)
-
- 1 0.5 [0.25, 0.75]
- 2 0.75 [0.375, 1.125]
- 3 1.125 [0.562, 1.687]
- 4 1.687 [0.8435, 2.53]
- 5 2.53 [1.265, 3.795]
- 6 3.795 [1.897, 5.692]
- 7 5.692 [2.846, 8.538]
- 8 8.538 [4.269, 12.807]
- 9 12.807 [6.403, 19.210]
- 10 19.210 backoff.Stop
-
-Note: Implementation is not thread-safe.
-*/
-type ExponentialBackOff struct {
- InitialInterval time.Duration
- RandomizationFactor float64
- Multiplier float64
- MaxInterval time.Duration
- // After MaxElapsedTime the ExponentialBackOff stops.
- // It never stops if MaxElapsedTime == 0.
- MaxElapsedTime time.Duration
- Clock Clock
-
- currentInterval time.Duration
- startTime time.Time
-}
-
-// Clock is an interface that returns current time for BackOff.
-type Clock interface {
- Now() time.Time
-}
-
-// Default values for ExponentialBackOff.
-const (
- DefaultInitialInterval = 500 * time.Millisecond
- DefaultRandomizationFactor = 0.5
- DefaultMultiplier = 1.5
- DefaultMaxInterval = 60 * time.Second
- DefaultMaxElapsedTime = 15 * time.Minute
-)
-
-// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
-func NewExponentialBackOff() *ExponentialBackOff {
- b := &ExponentialBackOff{
- InitialInterval: DefaultInitialInterval,
- RandomizationFactor: DefaultRandomizationFactor,
- Multiplier: DefaultMultiplier,
- MaxInterval: DefaultMaxInterval,
- MaxElapsedTime: DefaultMaxElapsedTime,
- Clock: SystemClock,
- }
- if b.RandomizationFactor < 0 {
- b.RandomizationFactor = 0
- } else if b.RandomizationFactor > 1 {
- b.RandomizationFactor = 1
- }
- b.Reset()
- return b
-}
-
-type systemClock struct{}
-
-func (t systemClock) Now() time.Time {
- return time.Now()
-}
-
-// SystemClock implements Clock interface that uses time.Now().
-var SystemClock = systemClock{}
-
-// Reset the interval back to the initial retry interval and restarts the timer.
-func (b *ExponentialBackOff) Reset() {
- b.currentInterval = b.InitialInterval
- b.startTime = b.Clock.Now()
-}
-
-// NextBackOff calculates the next backoff interval using the formula:
-// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval)
-func (b *ExponentialBackOff) NextBackOff() time.Duration {
- // Make sure we have not gone over the maximum elapsed time.
- if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime {
- return Stop
- }
- defer b.incrementCurrentInterval()
- return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
-}
-
-// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
-// is created and is reset when Reset() is called.
-//
-// The elapsed time is computed using time.Now().UnixNano().
-func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
- return b.Clock.Now().Sub(b.startTime)
-}
-
-// Increments the current interval by multiplying it with the multiplier.
-func (b *ExponentialBackOff) incrementCurrentInterval() {
- // Check for overflow, if overflow is detected set the current interval to the max interval.
- if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
- b.currentInterval = b.MaxInterval
- } else {
- b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
- }
-}
-
-// Returns a random value from the following interval:
-// [randomizationFactor * currentInterval, randomizationFactor * currentInterval].
-func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
- var delta = randomizationFactor * float64(currentInterval)
- var minInterval = float64(currentInterval) - delta
- var maxInterval = float64(currentInterval) + delta
-
- // Get a random value from the range [minInterval, maxInterval].
- // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
- // we want a 33% chance for selecting either 1, 2 or 3.
- return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
-}
diff --git a/vendor/src/github.com/cenkalti/backoff/exponential_test.go b/vendor/src/github.com/cenkalti/backoff/exponential_test.go
deleted file mode 100644
index 11b95e4..0000000
--- a/vendor/src/github.com/cenkalti/backoff/exponential_test.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package backoff
-
-import (
- "math"
- "testing"
- "time"
-)
-
-func TestBackOff(t *testing.T) {
- var (
- testInitialInterval = 500 * time.Millisecond
- testRandomizationFactor = 0.1
- testMultiplier = 2.0
- testMaxInterval = 5 * time.Second
- testMaxElapsedTime = 15 * time.Minute
- )
-
- exp := NewExponentialBackOff()
- exp.InitialInterval = testInitialInterval
- exp.RandomizationFactor = testRandomizationFactor
- exp.Multiplier = testMultiplier
- exp.MaxInterval = testMaxInterval
- exp.MaxElapsedTime = testMaxElapsedTime
- exp.Reset()
-
- var expectedResults = []time.Duration{500, 1000, 2000, 4000, 5000, 5000, 5000, 5000, 5000, 5000}
- for i, d := range expectedResults {
- expectedResults[i] = d * time.Millisecond
- }
-
- for _, expected := range expectedResults {
- assertEquals(t, expected, exp.currentInterval)
- // Assert that the next backoff falls in the expected range.
- var minInterval = expected - time.Duration(testRandomizationFactor*float64(expected))
- var maxInterval = expected + time.Duration(testRandomizationFactor*float64(expected))
- var actualInterval = exp.NextBackOff()
- if !(minInterval <= actualInterval && actualInterval <= maxInterval) {
- t.Error("error")
- }
- }
-}
-
-func TestGetRandomizedInterval(t *testing.T) {
- // 33% chance of being 1.
- assertEquals(t, 1, getRandomValueFromInterval(0.5, 0, 2))
- assertEquals(t, 1, getRandomValueFromInterval(0.5, 0.33, 2))
- // 33% chance of being 2.
- assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.34, 2))
- assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.66, 2))
- // 33% chance of being 3.
- assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.67, 2))
- assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.99, 2))
-}
-
-type TestClock struct {
- i time.Duration
- start time.Time
-}
-
-func (c *TestClock) Now() time.Time {
- t := c.start.Add(c.i)
- c.i += time.Second
- return t
-}
-
-func TestGetElapsedTime(t *testing.T) {
- var exp = NewExponentialBackOff()
- exp.Clock = &TestClock{}
- exp.Reset()
-
- var elapsedTime = exp.GetElapsedTime()
- if elapsedTime != time.Second {
- t.Errorf("elapsedTime=%d", elapsedTime)
- }
-}
-
-func TestMaxElapsedTime(t *testing.T) {
- var exp = NewExponentialBackOff()
- exp.Clock = &TestClock{start: time.Time{}.Add(10000 * time.Second)}
- // Change the currentElapsedTime to be 0 ensuring that the elapsed time will be greater
- // than the max elapsed time.
- exp.startTime = time.Time{}
- assertEquals(t, Stop, exp.NextBackOff())
-}
-
-func TestBackOffOverflow(t *testing.T) {
- var (
- testInitialInterval time.Duration = math.MaxInt64 / 2
- testMaxInterval time.Duration = math.MaxInt64
- testMultiplier = 2.1
- )
-
- exp := NewExponentialBackOff()
- exp.InitialInterval = testInitialInterval
- exp.Multiplier = testMultiplier
- exp.MaxInterval = testMaxInterval
- exp.Reset()
-
- exp.NextBackOff()
- // Assert that when an overflow is possible the current varerval time.Duration is set to the max varerval time.Duration .
- assertEquals(t, testMaxInterval, exp.currentInterval)
-}
-
-func assertEquals(t *testing.T, expected, value time.Duration) {
- if expected != value {
- t.Errorf("got: %d, expected: %d", value, expected)
- }
-}
diff --git a/vendor/src/github.com/cenkalti/backoff/retry.go b/vendor/src/github.com/cenkalti/backoff/retry.go
deleted file mode 100644
index 6bc88ce..0000000
--- a/vendor/src/github.com/cenkalti/backoff/retry.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package backoff
-
-import "time"
-
-// An Operation is executing by Retry() or RetryNotify().
-// The operation will be retried using a backoff policy if it returns an error.
-type Operation func() error
-
-// Notify is a notify-on-error function. It receives an operation error and
-// backoff delay if the operation failed (with an error).
-//
-// NOTE that if the backoff policy stated to stop retrying,
-// the notify function isn't called.
-type Notify func(error, time.Duration)
-
-// Retry the operation o until it does not return error or BackOff stops.
-// o is guaranteed to be run at least once.
-// It is the caller's responsibility to reset b after Retry returns.
-//
-// Retry sleeps the goroutine for the duration returned by BackOff after a
-// failed operation returns.
-func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) }
-
-// RetryNotify calls notify function with the error and wait duration
-// for each failed attempt before sleep.
-func RetryNotify(operation Operation, b BackOff, notify Notify) error {
- var err error
- var next time.Duration
-
- b.Reset()
- for {
- if err = operation(); err == nil {
- return nil
- }
-
- if next = b.NextBackOff(); next == Stop {
- return err
- }
-
- if notify != nil {
- notify(err, next)
- }
-
- time.Sleep(next)
- }
-}
diff --git a/vendor/src/github.com/cenkalti/backoff/retry_test.go b/vendor/src/github.com/cenkalti/backoff/retry_test.go
deleted file mode 100644
index 4d54b9a..0000000
--- a/vendor/src/github.com/cenkalti/backoff/retry_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package backoff
-
-import (
- "errors"
- "log"
- "testing"
-)
-
-func TestRetry(t *testing.T) {
- const successOn = 3
- var i = 0
-
- // This function is successful on "successOn" calls.
- f := func() error {
- i++
- log.Printf("function is called %d. time\n", i)
-
- if i == successOn {
- log.Println("OK")
- return nil
- }
-
- log.Println("error")
- return errors.New("error")
- }
-
- err := Retry(f, NewExponentialBackOff())
- if err != nil {
- t.Errorf("unexpected error: %s", err.Error())
- }
- if i != successOn {
- t.Errorf("invalid number of retries: %d", i)
- }
-}
diff --git a/vendor/src/github.com/cenkalti/backoff/ticker.go b/vendor/src/github.com/cenkalti/backoff/ticker.go
deleted file mode 100644
index 7a5ff4e..0000000
--- a/vendor/src/github.com/cenkalti/backoff/ticker.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package backoff
-
-import (
- "runtime"
- "sync"
- "time"
-)
-
-// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
-//
-// Ticks will continue to arrive when the previous operation is still running,
-// so operations that take a while to fail could run in quick succession.
-type Ticker struct {
- C <-chan time.Time
- c chan time.Time
- b BackOff
- stop chan struct{}
- stopOnce sync.Once
-}
-
-// NewTicker returns a new Ticker containing a channel that will send the time at times
-// specified by the BackOff argument. Ticker is guaranteed to tick at least once.
-// The channel is closed when Stop method is called or BackOff stops.
-func NewTicker(b BackOff) *Ticker {
- c := make(chan time.Time)
- t := &Ticker{
- C: c,
- c: c,
- b: b,
- stop: make(chan struct{}),
- }
- go t.run()
- runtime.SetFinalizer(t, (*Ticker).Stop)
- return t
-}
-
-// Stop turns off a ticker. After Stop, no more ticks will be sent.
-func (t *Ticker) Stop() {
- t.stopOnce.Do(func() { close(t.stop) })
-}
-
-func (t *Ticker) run() {
- c := t.c
- defer close(c)
- t.b.Reset()
-
- // Ticker is guaranteed to tick at least once.
- afterC := t.send(time.Now())
-
- for {
- if afterC == nil {
- return
- }
-
- select {
- case tick := <-afterC:
- afterC = t.send(tick)
- case <-t.stop:
- t.c = nil // Prevent future ticks from being sent to the channel.
- return
- }
- }
-}
-
-func (t *Ticker) send(tick time.Time) <-chan time.Time {
- select {
- case t.c <- tick:
- case <-t.stop:
- return nil
- }
-
- next := t.b.NextBackOff()
- if next == Stop {
- t.Stop()
- return nil
- }
-
- return time.After(next)
-}
diff --git a/vendor/src/github.com/cenkalti/backoff/ticker_test.go b/vendor/src/github.com/cenkalti/backoff/ticker_test.go
deleted file mode 100644
index 562718c..0000000
--- a/vendor/src/github.com/cenkalti/backoff/ticker_test.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package backoff
-
-import (
- "errors"
- "log"
- "testing"
-)
-
-func TestTicker(t *testing.T) {
- const successOn = 3
- var i = 0
-
- // This function is successful on "successOn" calls.
- f := func() error {
- i++
- log.Printf("function is called %d. time\n", i)
-
- if i == successOn {
- log.Println("OK")
- return nil
- }
-
- log.Println("error")
- return errors.New("error")
- }
-
- b := NewExponentialBackOff()
- ticker := NewTicker(b)
-
- var err error
- for _ = range ticker.C {
- if err = f(); err != nil {
- t.Log(err)
- continue
- }
-
- break
- }
- if err != nil {
- t.Errorf("unexpected error: %s", err.Error())
- }
- if i != successOn {
- t.Errorf("invalid number of retries: %d", i)
- }
-}
diff --git a/vendor/src/github.com/codegangsta/cli/CHANGELOG.md b/vendor/src/github.com/codegangsta/cli/CHANGELOG.md
deleted file mode 100644
index 8b0d0ee..0000000
--- a/vendor/src/github.com/codegangsta/cli/CHANGELOG.md
+++ /dev/null
@@ -1,336 +0,0 @@
-# Change Log
-
-**ATTN**: This project uses [semantic versioning](http://semver.org/).
-
-## [Unreleased]
-### Added
-- Flag type code generation via `go generate`
-- Write to stderr and exit 1 if action returns non-nil error
-- Added support for TOML to the `altsrc` loader
-
-### Changed
-- Raise minimum tested/supported Go version to 1.2+
-
-## [1.18.0] - 2016-06-27
-### Added
-- `./runtests` test runner with coverage tracking by default
-- testing on OS X
-- testing on Windows
-- `UintFlag`, `Uint64Flag`, and `Int64Flag` types and supporting code
-
-### Changed
-- Use spaces for alignment in help/usage output instead of tabs, making the
- output alignment consistent regardless of tab width
-
-### Fixed
-- Printing of command aliases in help text
-- Printing of visible flags for both struct and struct pointer flags
-- Display the `help` subcommand when using `CommandCategories`
-- No longer swallows `panic`s that occur within the `Action`s themselves when
- detecting the signature of the `Action` field
-
-## [1.17.0] - 2016-05-09
-### Added
-- Pluggable flag-level help text rendering via `cli.DefaultFlagStringFunc`
-- `context.GlobalBoolT` was added as an analogue to `context.GlobalBool`
-- Support for hiding commands by setting `Hidden: true` -- this will hide the
- commands in help output
-
-### Changed
-- `Float64Flag`, `IntFlag`, and `DurationFlag` default values are no longer
- quoted in help text output.
-- All flag types now include `(default: {value})` strings following usage when a
- default value can be (reasonably) detected.
-- `IntSliceFlag` and `StringSliceFlag` usage strings are now more consistent
- with non-slice flag types
-- Apps now exit with a code of 3 if an unknown subcommand is specified
- (previously they printed "No help topic for...", but still exited 0. This
- makes it easier to script around apps built using `cli` since they can trust
- that a 0 exit code indicated a successful execution.
-- cleanups based on [Go Report Card
- feedback](https://goreportcard.com/report/github.com/urfave/cli)
-
-## [1.16.0] - 2016-05-02
-### Added
-- `Hidden` field on all flag struct types to omit from generated help text
-
-### Changed
-- `BashCompletionFlag` (`--enable-bash-completion`) is now omitted from
-generated help text via the `Hidden` field
-
-### Fixed
-- handling of error values in `HandleAction` and `HandleExitCoder`
-
-## [1.15.0] - 2016-04-30
-### Added
-- This file!
-- Support for placeholders in flag usage strings
-- `App.Metadata` map for arbitrary data/state management
-- `Set` and `GlobalSet` methods on `*cli.Context` for altering values after
-parsing.
-- Support for nested lookup of dot-delimited keys in structures loaded from
-YAML.
-
-### Changed
-- The `App.Action` and `Command.Action` now prefer a return signature of
-`func(*cli.Context) error`, as defined by `cli.ActionFunc`. If a non-nil
-`error` is returned, there may be two outcomes:
- - If the error fulfills `cli.ExitCoder`, then `os.Exit` will be called
- automatically
- - Else the error is bubbled up and returned from `App.Run`
-- Specifying an `Action` with the legacy return signature of
-`func(*cli.Context)` will produce a deprecation message to stderr
-- Specifying an `Action` that is not a `func` type will produce a non-zero exit
-from `App.Run`
-- Specifying an `Action` func that has an invalid (input) signature will
-produce a non-zero exit from `App.Run`
-
-### Deprecated
--
-`cli.App.RunAndExitOnError`, which should now be done by returning an error
-that fulfills `cli.ExitCoder` to `cli.App.Run`.
-- the legacy signature for
-`cli.App.Action` of `func(*cli.Context)`, which should now have a return
-signature of `func(*cli.Context) error`, as defined by `cli.ActionFunc`.
-
-### Fixed
-- Added missing `*cli.Context.GlobalFloat64` method
-
-## [1.14.0] - 2016-04-03 (backfilled 2016-04-25)
-### Added
-- Codebeat badge
-- Support for categorization via `CategorizedHelp` and `Categories` on app.
-
-### Changed
-- Use `filepath.Base` instead of `path.Base` in `Name` and `HelpName`.
-
-### Fixed
-- Ensure version is not shown in help text when `HideVersion` set.
-
-## [1.13.0] - 2016-03-06 (backfilled 2016-04-25)
-### Added
-- YAML file input support.
-- `NArg` method on context.
-
-## [1.12.0] - 2016-02-17 (backfilled 2016-04-25)
-### Added
-- Custom usage error handling.
-- Custom text support in `USAGE` section of help output.
-- Improved help messages for empty strings.
-- AppVeyor CI configuration.
-
-### Changed
-- Removed `panic` from default help printer func.
-- De-duping and optimizations.
-
-### Fixed
-- Correctly handle `Before`/`After` at command level when no subcommands.
-- Case of literal `-` argument causing flag reordering.
-- Environment variable hints on Windows.
-- Docs updates.
-
-## [1.11.1] - 2015-12-21 (backfilled 2016-04-25)
-### Changed
-- Use `path.Base` in `Name` and `HelpName`
-- Export `GetName` on flag types.
-
-### Fixed
-- Flag parsing when skipping is enabled.
-- Test output cleanup.
-- Move completion check to account for empty input case.
-
-## [1.11.0] - 2015-11-15 (backfilled 2016-04-25)
-### Added
-- Destination scan support for flags.
-- Testing against `tip` in Travis CI config.
-
-### Changed
-- Go version in Travis CI config.
-
-### Fixed
-- Removed redundant tests.
-- Use correct example naming in tests.
-
-## [1.10.2] - 2015-10-29 (backfilled 2016-04-25)
-### Fixed
-- Remove unused var in bash completion.
-
-## [1.10.1] - 2015-10-21 (backfilled 2016-04-25)
-### Added
-- Coverage and reference logos in README.
-
-### Fixed
-- Use specified values in help and version parsing.
-- Only display app version and help message once.
-
-## [1.10.0] - 2015-10-06 (backfilled 2016-04-25)
-### Added
-- More tests for existing functionality.
-- `ArgsUsage` at app and command level for help text flexibility.
-
-### Fixed
-- Honor `HideHelp` and `HideVersion` in `App.Run`.
-- Remove juvenile word from README.
-
-## [1.9.0] - 2015-09-08 (backfilled 2016-04-25)
-### Added
-- `FullName` on command with accompanying help output update.
-- Set default `$PROG` in bash completion.
-
-### Changed
-- Docs formatting.
-
-### Fixed
-- Removed self-referential imports in tests.
-
-## [1.8.0] - 2015-06-30 (backfilled 2016-04-25)
-### Added
-- Support for `Copyright` at app level.
-- `Parent` func at context level to walk up context lineage.
-
-### Fixed
-- Global flag processing at top level.
-
-## [1.7.1] - 2015-06-11 (backfilled 2016-04-25)
-### Added
-- Aggregate errors from `Before`/`After` funcs.
-- Doc comments on flag structs.
-- Include non-global flags when checking version and help.
-- Travis CI config updates.
-
-### Fixed
-- Ensure slice type flags have non-nil values.
-- Collect global flags from the full command hierarchy.
-- Docs prose.
-
-## [1.7.0] - 2015-05-03 (backfilled 2016-04-25)
-### Changed
-- `HelpPrinter` signature includes output writer.
-
-### Fixed
-- Specify go 1.1+ in docs.
-- Set `Writer` when running command as app.
-
-## [1.6.0] - 2015-03-23 (backfilled 2016-04-25)
-### Added
-- Multiple author support.
-- `NumFlags` at context level.
-- `Aliases` at command level.
-
-### Deprecated
-- `ShortName` at command level.
-
-### Fixed
-- Subcommand help output.
-- Backward compatible support for deprecated `Author` and `Email` fields.
-- Docs regarding `Names`/`Aliases`.
-
-## [1.5.0] - 2015-02-20 (backfilled 2016-04-25)
-### Added
-- `After` hook func support at app and command level.
-
-### Fixed
-- Use parsed context when running command as subcommand.
-- Docs prose.
-
-## [1.4.1] - 2015-01-09 (backfilled 2016-04-25)
-### Added
-- Support for hiding `-h / --help` flags, but not `help` subcommand.
-- Stop flag parsing after `--`.
-
-### Fixed
-- Help text for generic flags to specify single value.
-- Use double quotes in output for defaults.
-- Use `ParseInt` instead of `ParseUint` for int environment var values.
-- Use `0` as base when parsing int environment var values.
-
-## [1.4.0] - 2014-12-12 (backfilled 2016-04-25)
-### Added
-- Support for environment variable lookup "cascade".
-- Support for `Stdout` on app for output redirection.
-
-### Fixed
-- Print command help instead of app help in `ShowCommandHelp`.
-
-## [1.3.1] - 2014-11-13 (backfilled 2016-04-25)
-### Added
-- Docs and example code updates.
-
-### Changed
-- Default `-v / --version` flag made optional.
-
-## [1.3.0] - 2014-08-10 (backfilled 2016-04-25)
-### Added
-- `FlagNames` at context level.
-- Exposed `VersionPrinter` var for more control over version output.
-- Zsh completion hook.
-- `AUTHOR` section in default app help template.
-- Contribution guidelines.
-- `DurationFlag` type.
-
-## [1.2.0] - 2014-08-02
-### Added
-- Support for environment variable defaults on flags plus tests.
-
-## [1.1.0] - 2014-07-15
-### Added
-- Bash completion.
-- Optional hiding of built-in help command.
-- Optional skipping of flag parsing at command level.
-- `Author`, `Email`, and `Compiled` metadata on app.
-- `Before` hook func support at app and command level.
-- `CommandNotFound` func support at app level.
-- Command reference available on context.
-- `GenericFlag` type.
-- `Float64Flag` type.
-- `BoolTFlag` type.
-- `IsSet` flag helper on context.
-- More flag lookup funcs at context level.
-- More tests & docs.
-
-### Changed
-- Help template updates to account for presence/absence of flags.
-- Separated subcommand help template.
-- Exposed `HelpPrinter` var for more control over help output.
-
-## [1.0.0] - 2013-11-01
-### Added
-- `help` flag in default app flag set and each command flag set.
-- Custom handling of argument parsing errors.
-- Command lookup by name at app level.
-- `StringSliceFlag` type and supporting `StringSlice` type.
-- `IntSliceFlag` type and supporting `IntSlice` type.
-- Slice type flag lookups by name at context level.
-- Export of app and command help functions.
-- More tests & docs.
-
-## 0.1.0 - 2013-07-22
-### Added
-- Initial implementation.
-
-[Unreleased]: https://github.com/urfave/cli/compare/v1.18.0...HEAD
-[1.18.0]: https://github.com/urfave/cli/compare/v1.17.0...v1.18.0
-[1.17.0]: https://github.com/urfave/cli/compare/v1.16.0...v1.17.0
-[1.16.0]: https://github.com/urfave/cli/compare/v1.15.0...v1.16.0
-[1.15.0]: https://github.com/urfave/cli/compare/v1.14.0...v1.15.0
-[1.14.0]: https://github.com/urfave/cli/compare/v1.13.0...v1.14.0
-[1.13.0]: https://github.com/urfave/cli/compare/v1.12.0...v1.13.0
-[1.12.0]: https://github.com/urfave/cli/compare/v1.11.1...v1.12.0
-[1.11.1]: https://github.com/urfave/cli/compare/v1.11.0...v1.11.1
-[1.11.0]: https://github.com/urfave/cli/compare/v1.10.2...v1.11.0
-[1.10.2]: https://github.com/urfave/cli/compare/v1.10.1...v1.10.2
-[1.10.1]: https://github.com/urfave/cli/compare/v1.10.0...v1.10.1
-[1.10.0]: https://github.com/urfave/cli/compare/v1.9.0...v1.10.0
-[1.9.0]: https://github.com/urfave/cli/compare/v1.8.0...v1.9.0
-[1.8.0]: https://github.com/urfave/cli/compare/v1.7.1...v1.8.0
-[1.7.1]: https://github.com/urfave/cli/compare/v1.7.0...v1.7.1
-[1.7.0]: https://github.com/urfave/cli/compare/v1.6.0...v1.7.0
-[1.6.0]: https://github.com/urfave/cli/compare/v1.5.0...v1.6.0
-[1.5.0]: https://github.com/urfave/cli/compare/v1.4.1...v1.5.0
-[1.4.1]: https://github.com/urfave/cli/compare/v1.4.0...v1.4.1
-[1.4.0]: https://github.com/urfave/cli/compare/v1.3.1...v1.4.0
-[1.3.1]: https://github.com/urfave/cli/compare/v1.3.0...v1.3.1
-[1.3.0]: https://github.com/urfave/cli/compare/v1.2.0...v1.3.0
-[1.2.0]: https://github.com/urfave/cli/compare/v1.1.0...v1.2.0
-[1.1.0]: https://github.com/urfave/cli/compare/v1.0.0...v1.1.0
-[1.0.0]: https://github.com/urfave/cli/compare/v0.1.0...v1.0.0
diff --git a/vendor/src/github.com/codegangsta/cli/LICENSE b/vendor/src/github.com/codegangsta/cli/LICENSE
deleted file mode 100644
index 42a597e..0000000
--- a/vendor/src/github.com/codegangsta/cli/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2016 Jeremy Saenz & Contributors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/src/github.com/codegangsta/cli/README.md b/vendor/src/github.com/codegangsta/cli/README.md
deleted file mode 100644
index a1e537c..0000000
--- a/vendor/src/github.com/codegangsta/cli/README.md
+++ /dev/null
@@ -1,1313 +0,0 @@
-cli
-===
-
-[![Build Status](https://travis-ci.org/urfave/cli.svg?branch=master)](https://travis-ci.org/urfave/cli)
-[![Windows Build Status](https://ci.appveyor.com/api/projects/status/rtgk5xufi932pb2v?svg=true)](https://ci.appveyor.com/project/urfave/cli)
-[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli)
-[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli)
-[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli)
-[![top level coverage](https://gocover.io/_badge/github.com/urfave/cli?0 "top level coverage")](http://gocover.io/github.com/urfave/cli) /
-[![altsrc coverage](https://gocover.io/_badge/github.com/urfave/cli/altsrc?0 "altsrc coverage")](http://gocover.io/github.com/urfave/cli/altsrc)
-
-**Notice:** This is the library formerly known as
-`github.com/codegangsta/cli` -- Github will automatically redirect requests
-to this repository, but we recommend updating your references for clarity.
-
-cli is a simple, fast, and fun package for building command line apps in Go. The
-goal is to enable developers to write fast and distributable command line
-applications in an expressive way.
-
-
-
-- [Overview](#overview)
-- [Installation](#installation)
- * [Supported platforms](#supported-platforms)
- * [Using the `v2` branch](#using-the-v2-branch)
- * [Pinning to the `v1` releases](#pinning-to-the-v1-releases)
-- [Getting Started](#getting-started)
-- [Examples](#examples)
- * [Arguments](#arguments)
- * [Flags](#flags)
- + [Placeholder Values](#placeholder-values)
- + [Alternate Names](#alternate-names)
- + [Values from the Environment](#values-from-the-environment)
- + [Values from alternate input sources (YAML, TOML, and others)](#values-from-alternate-input-sources-yaml-toml-and-others)
- * [Subcommands](#subcommands)
- * [Subcommands categories](#subcommands-categories)
- * [Exit code](#exit-code)
- * [Bash Completion](#bash-completion)
- + [Enabling](#enabling)
- + [Distribution](#distribution)
- + [Customization](#customization)
- * [Generated Help Text](#generated-help-text)
- + [Customization](#customization-1)
- * [Version Flag](#version-flag)
- + [Customization](#customization-2)
- + [Full API Example](#full-api-example)
-- [Contribution Guidelines](#contribution-guidelines)
-
-
-
-## Overview
-
-Command line apps are usually so tiny that there is absolutely no reason why
-your code should *not* be self-documenting. Things like generating help text and
-parsing command flags/options should not hinder productivity when writing a
-command line app.
-
-**This is where cli comes into play.** cli makes command line programming fun,
-organized, and expressive!
-
-## Installation
-
-Make sure you have a working Go environment. Go version 1.2+ is supported. [See
-the install instructions for Go](http://golang.org/doc/install.html).
-
-To install cli, simply run:
-```
-$ go get github.com/urfave/cli
-```
-
-Make sure your `PATH` includes the `$GOPATH/bin` directory so your commands can
-be easily used:
-```
-export PATH=$PATH:$GOPATH/bin
-```
-
-### Supported platforms
-
-cli is tested against multiple versions of Go on Linux, and against the latest
-released version of Go on OS X and Windows. For full details, see
-[`./.travis.yml`](./.travis.yml) and [`./appveyor.yml`](./appveyor.yml).
-
-### Using the `v2` branch
-
-**Warning**: The `v2` branch is currently unreleased and considered unstable.
-
-There is currently a long-lived branch named `v2` that is intended to land as
-the new `master` branch once development there has settled down. The current
-`master` branch (mirrored as `v1`) is being manually merged into `v2` on
-an irregular human-based schedule, but generally if one wants to "upgrade" to
-`v2` *now* and accept the volatility (read: "awesomeness") that comes along with
-that, please use whatever version pinning of your preference, such as via
-`gopkg.in`:
-
-```
-$ go get gopkg.in/urfave/cli.v2
-```
-
-``` go
-...
-import (
- "gopkg.in/urfave/cli.v2" // imports as package "cli"
-)
-...
-```
-
-### Pinning to the `v1` releases
-
-Similarly to the section above describing use of the `v2` branch, if one wants
-to avoid any unexpected compatibility pains once `v2` becomes `master`, then
-pinning to `v1` is an acceptable option, e.g.:
-
-```
-$ go get gopkg.in/urfave/cli.v1
-```
-
-``` go
-...
-import (
- "gopkg.in/urfave/cli.v1" // imports as package "cli"
-)
-...
-```
-
-This will pull the latest tagged `v1` release (e.g. `v1.18.1` at the time of writing).
-
-## Getting Started
-
-One of the philosophies behind cli is that an API should be playful and full of
-discovery. So a cli app can be as little as one line of code in `main()`.
-
-
-``` go
-package main
-
-import (
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- cli.NewApp().Run(os.Args)
-}
-```
-
-This app will run and show help text, but is not very useful. Let's give an
-action to execute and some help documentation:
-
-
-``` go
-package main
-
-import (
- "fmt"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
- app.Name = "boom"
- app.Usage = "make an explosive entrance"
- app.Action = func(c *cli.Context) error {
- fmt.Println("boom! I say!")
- return nil
- }
-
- app.Run(os.Args)
-}
-```
-
-Running this already gives you a ton of functionality, plus support for things
-like subcommands and flags, which are covered below.
-
-## Examples
-
-Being a programmer can be a lonely job. Thankfully by the power of automation
-that is not the case! Let's create a greeter app to fend off our demons of
-loneliness!
-
-Start by creating a directory named `greet`, and within it, add a file,
-`greet.go` with the following code in it:
-
-
-``` go
-package main
-
-import (
- "fmt"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
- app.Name = "greet"
- app.Usage = "fight the loneliness!"
- app.Action = func(c *cli.Context) error {
- fmt.Println("Hello friend!")
- return nil
- }
-
- app.Run(os.Args)
-}
-```
-
-Install our command to the `$GOPATH/bin` directory:
-
-```
-$ go install
-```
-
-Finally run our new command:
-
-```
-$ greet
-Hello friend!
-```
-
-cli also generates neat help text:
-
-```
-$ greet help
-NAME:
- greet - fight the loneliness!
-
-USAGE:
- greet [global options] command [command options] [arguments...]
-
-VERSION:
- 0.0.0
-
-COMMANDS:
- help, h Shows a list of commands or help for one command
-
-GLOBAL OPTIONS
- --version Shows version information
-```
-
-### Arguments
-
-You can lookup arguments by calling the `Args` function on `cli.Context`, e.g.:
-
-
-``` go
-package main
-
-import (
- "fmt"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Action = func(c *cli.Context) error {
- fmt.Printf("Hello %q", c.Args().Get(0))
- return nil
- }
-
- app.Run(os.Args)
-}
-```
-
-### Flags
-
-Setting and querying flags is simple.
-
-
-``` go
-package main
-
-import (
- "fmt"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang",
- Value: "english",
- Usage: "language for the greeting",
- },
- }
-
- app.Action = func(c *cli.Context) error {
- name := "Nefertiti"
- if c.NArg() > 0 {
- name = c.Args().Get(0)
- }
- if c.String("lang") == "spanish" {
- fmt.Println("Hola", name)
- } else {
- fmt.Println("Hello", name)
- }
- return nil
- }
-
- app.Run(os.Args)
-}
-```
-
-You can also set a destination variable for a flag, to which the content will be
-scanned.
-
-
-``` go
-package main
-
-import (
- "os"
- "fmt"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- var language string
-
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang",
- Value: "english",
- Usage: "language for the greeting",
- Destination: &language,
- },
- }
-
- app.Action = func(c *cli.Context) error {
- name := "someone"
- if c.NArg() > 0 {
- name = c.Args()[0]
- }
- if language == "spanish" {
- fmt.Println("Hola", name)
- } else {
- fmt.Println("Hello", name)
- }
- return nil
- }
-
- app.Run(os.Args)
-}
-```
-
-See full list of flags at http://godoc.org/github.com/urfave/cli
-
-#### Placeholder Values
-
-Sometimes it's useful to specify a flag's value within the usage string itself.
-Such placeholders are indicated with back quotes.
-
-For example this:
-
-
-```go
-package main
-
-import (
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag{
- cli.StringFlag{
- Name: "config, c",
- Usage: "Load configuration from `FILE`",
- },
- }
-
- app.Run(os.Args)
-}
-```
-
-Will result in help output like:
-
-```
---config FILE, -c FILE Load configuration from FILE
-```
-
-Note that only the first placeholder is used. Subsequent back-quoted words will
-be left as-is.
-
-#### Alternate Names
-
-You can set alternate (or short) names for flags by providing a comma-delimited
-list for the `Name`. e.g.
-
-
-``` go
-package main
-
-import (
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang, l",
- Value: "english",
- Usage: "language for the greeting",
- },
- }
-
- app.Run(os.Args)
-}
-```
-
-That flag can then be set with `--lang spanish` or `-l spanish`. Note that
-giving two different forms of the same flag in the same command invocation is an
-error.
-
-#### Values from the Environment
-
-You can also have the default value set from the environment via `EnvVar`. e.g.
-
-
-``` go
-package main
-
-import (
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang, l",
- Value: "english",
- Usage: "language for the greeting",
- EnvVar: "APP_LANG",
- },
- }
-
- app.Run(os.Args)
-}
-```
-
-The `EnvVar` may also be given as a comma-delimited "cascade", where the first
-environment variable that resolves is used as the default.
-
-
-``` go
-package main
-
-import (
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Flags = []cli.Flag {
- cli.StringFlag{
- Name: "lang, l",
- Value: "english",
- Usage: "language for the greeting",
- EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG",
- },
- }
-
- app.Run(os.Args)
-}
-```
-
-#### Values from alternate input sources (YAML, TOML, and others)
-
-There is a separate package altsrc that adds support for getting flag values
-from other file input sources.
-
-Currently supported input source formats:
-* YAML
-* TOML
-
-In order to get values for a flag from an alternate input source the following
-code would be added to wrap an existing cli.Flag like below:
-
-``` go
- altsrc.NewIntFlag(cli.IntFlag{Name: "test"})
-```
-
-Initialization must also occur for these flags. Below is an example initializing
-getting data from a yaml file below.
-
-``` go
- command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
-```
-
-The code above will use the "load" string as a flag name to get the file name of
-a yaml file from the cli.Context. It will then use that file name to initialize
-the yaml input source for any flags that are defined on that command. As a note
-the "load" flag used would also have to be defined on the command flags in order
-for this code snipped to work.
-
-Currently only the aboved specified formats are supported but developers can
-add support for other input sources by implementing the
-altsrc.InputSourceContext for their given sources.
-
-Here is a more complete sample of a command using YAML support:
-
-
-``` go
-package notmain
-
-import (
- "fmt"
- "os"
-
- "github.com/urfave/cli"
- "github.com/urfave/cli/altsrc"
-)
-
-func main() {
- app := cli.NewApp()
-
- flags := []cli.Flag{
- altsrc.NewIntFlag(cli.IntFlag{Name: "test"}),
- cli.StringFlag{Name: "load"},
- }
-
- app.Action = func(c *cli.Context) error {
- fmt.Println("yaml ist rad")
- return nil
- }
-
- app.Before = altsrc.InitInputSourceWithContext(flags, altsrc.NewYamlSourceFromFlagFunc("load"))
- app.Flags = flags
-
- app.Run(os.Args)
-}
-```
-
-### Subcommands
-
-Subcommands can be defined for a more git-like command line app.
-
-
-```go
-package main
-
-import (
- "fmt"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Commands = []cli.Command{
- {
- Name: "add",
- Aliases: []string{"a"},
- Usage: "add a task to the list",
- Action: func(c *cli.Context) error {
- fmt.Println("added task: ", c.Args().First())
- return nil
- },
- },
- {
- Name: "complete",
- Aliases: []string{"c"},
- Usage: "complete a task on the list",
- Action: func(c *cli.Context) error {
- fmt.Println("completed task: ", c.Args().First())
- return nil
- },
- },
- {
- Name: "template",
- Aliases: []string{"t"},
- Usage: "options for task templates",
- Subcommands: []cli.Command{
- {
- Name: "add",
- Usage: "add a new template",
- Action: func(c *cli.Context) error {
- fmt.Println("new task template: ", c.Args().First())
- return nil
- },
- },
- {
- Name: "remove",
- Usage: "remove an existing template",
- Action: func(c *cli.Context) error {
- fmt.Println("removed task template: ", c.Args().First())
- return nil
- },
- },
- },
- },
- }
-
- app.Run(os.Args)
-}
-```
-
-### Subcommands categories
-
-For additional organization in apps that have many subcommands, you can
-associate a category for each command to group them together in the help
-output.
-
-E.g.
-
-```go
-package main
-
-import (
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
-
- app.Commands = []cli.Command{
- {
- Name: "noop",
- },
- {
- Name: "add",
- Category: "template",
- },
- {
- Name: "remove",
- Category: "template",
- },
- }
-
- app.Run(os.Args)
-}
-```
-
-Will include:
-
-```
-COMMANDS:
- noop
-
- Template actions:
- add
- remove
-```
-
-### Exit code
-
-Calling `App.Run` will not automatically call `os.Exit`, which means that by
-default the exit code will "fall through" to being `0`. An explicit exit code
-may be set by returning a non-nil error that fulfills `cli.ExitCoder`, *or* a
-`cli.MultiError` that includes an error that fulfills `cli.ExitCoder`, e.g.:
-
-``` go
-package main
-
-import (
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- app := cli.NewApp()
- app.Flags = []cli.Flag{
- cli.BoolTFlag{
- Name: "ginger-crouton",
- Usage: "is it in the soup?",
- },
- }
- app.Action = func(ctx *cli.Context) error {
- if !ctx.Bool("ginger-crouton") {
- return cli.NewExitError("it is not in the soup", 86)
- }
- return nil
- }
-
- app.Run(os.Args)
-}
-```
-
-### Bash Completion
-
-You can enable completion commands by setting the `EnableBashCompletion`
-flag on the `App` object. By default, this setting will only auto-complete to
-show an app's subcommands, but you can write your own completion methods for
-the App or its subcommands.
-
-
-``` go
-package main
-
-import (
- "fmt"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- tasks := []string{"cook", "clean", "laundry", "eat", "sleep", "code"}
-
- app := cli.NewApp()
- app.EnableBashCompletion = true
- app.Commands = []cli.Command{
- {
- Name: "complete",
- Aliases: []string{"c"},
- Usage: "complete a task on the list",
- Action: func(c *cli.Context) error {
- fmt.Println("completed task: ", c.Args().First())
- return nil
- },
- BashComplete: func(c *cli.Context) {
- // This will complete if no args are passed
- if c.NArg() > 0 {
- return
- }
- for _, t := range tasks {
- fmt.Println(t)
- }
- },
- },
- }
-
- app.Run(os.Args)
-}
-```
-
-#### Enabling
-
-Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while
-setting the `PROG` variable to the name of your program:
-
-`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete`
-
-#### Distribution
-
-Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename
-it to the name of the program you wish to add autocomplete support for (or
-automatically install it there if you are distributing a package). Don't forget
-to source the file to make it active in the current shell.
-
-```
-sudo cp src/bash_autocomplete /etc/bash_completion.d/
-source /etc/bash_completion.d/
-```
-
-Alternatively, you can just document that users should source the generic
-`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set
-to the name of their program (as above).
-
-#### Customization
-
-The default bash completion flag (`--generate-bash-completion`) is defined as
-`cli.BashCompletionFlag`, and may be redefined if desired, e.g.:
-
-
-``` go
-package main
-
-import (
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- cli.BashCompletionFlag = cli.BoolFlag{
- Name: "compgen",
- Hidden: true,
- }
-
- app := cli.NewApp()
- app.EnableBashCompletion = true
- app.Commands = []cli.Command{
- {
- Name: "wat",
- },
- }
- app.Run(os.Args)
-}
-```
-
-### Generated Help Text
-
-The default help flag (`-h/--help`) is defined as `cli.HelpFlag` and is checked
-by the cli internals in order to print generated help text for the app, command,
-or subcommand, and break execution.
-
-#### Customization
-
-All of the help text generation may be customized, and at multiple levels. The
-templates are exposed as variables `AppHelpTemplate`, `CommandHelpTemplate`, and
-`SubcommandHelpTemplate` which may be reassigned or augmented, and full override
-is possible by assigning a compatible func to the `cli.HelpPrinter` variable,
-e.g.:
-
-
-``` go
-package main
-
-import (
- "fmt"
- "io"
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- // EXAMPLE: Append to an existing template
- cli.AppHelpTemplate = fmt.Sprintf(`%s
-
-WEBSITE: http://awesometown.example.com
-
-SUPPORT: support@awesometown.example.com
-
-`, cli.AppHelpTemplate)
-
- // EXAMPLE: Override a template
- cli.AppHelpTemplate = `NAME:
- {{.Name}} - {{.Usage}}
-USAGE:
- {{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command
-[command options]{{end}} {{if
-.ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
- {{if len .Authors}}
-AUTHOR(S):
- {{range .Authors}}{{ . }}{{end}}
- {{end}}{{if .Commands}}
-COMMANDS:
-{{range .Commands}}{{if not .HideHelp}} {{join .Names ", "}}{{ "\t"
-}}{{.Usage}}{{ "\n" }}{{end}}{{end}}{{end}}{{if .VisibleFlags}}
-GLOBAL OPTIONS:
- {{range .VisibleFlags}}{{.}}
- {{end}}{{end}}{{if .Copyright }}
-COPYRIGHT:
- {{.Copyright}}
- {{end}}{{if .Version}}
-VERSION:
- {{.Version}}
- {{end}}
-`
-
- // EXAMPLE: Replace the `HelpPrinter` func
- cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) {
- fmt.Println("Ha HA. I pwnd the help!!1")
- }
-
- cli.NewApp().Run(os.Args)
-}
-```
-
-The default flag may be customized to something other than `-h/--help` by
-setting `cli.HelpFlag`, e.g.:
-
-
-``` go
-package main
-
-import (
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- cli.HelpFlag = cli.BoolFlag{
- Name: "halp, haaaaalp",
- Usage: "HALP",
- EnvVar: "SHOW_HALP,HALPPLZ",
- }
-
- cli.NewApp().Run(os.Args)
-}
-```
-
-### Version Flag
-
-The default version flag (`-v/--version`) is defined as `cli.VersionFlag`, which
-is checked by the cli internals in order to print the `App.Version` via
-`cli.VersionPrinter` and break execution.
-
-#### Customization
-
-The default flag may be customized to something other than `-v/--version` by
-setting `cli.VersionFlag`, e.g.:
-
-
-``` go
-package main
-
-import (
- "os"
-
- "github.com/urfave/cli"
-)
-
-func main() {
- cli.VersionFlag = cli.BoolFlag{
- Name: "print-version, V",
- Usage: "print only the version",
- }
-
- app := cli.NewApp()
- app.Name = "partay"
- app.Version = "19.99.0"
- app.Run(os.Args)
-}
-```
-
-Alternatively, the version printer at `cli.VersionPrinter` may be overridden, e.g.:
-
-
-``` go
-package main
-
-import (
- "fmt"
- "os"
-
- "github.com/urfave/cli"
-)
-
-var (
- Revision = "fafafaf"
-)
-
-func main() {
- cli.VersionPrinter = func(c *cli.Context) {
- fmt.Printf("version=%s revision=%s\n", c.App.Version, Revision)
- }
-
- app := cli.NewApp()
- app.Name = "partay"
- app.Version = "19.99.0"
- app.Run(os.Args)
-}
-```
-
-#### Full API Example
-
-**Notice**: This is a contrived (functioning) example meant strictly for API
-demonstration purposes. Use of one's imagination is encouraged.
-
-
-``` go
-package main
-
-import (
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "time"
-
- "github.com/urfave/cli"
-)
-
-func init() {
- cli.AppHelpTemplate += "\nCUSTOMIZED: you bet ur muffins\n"
- cli.CommandHelpTemplate += "\nYMMV\n"
- cli.SubcommandHelpTemplate += "\nor something\n"
-
- cli.HelpFlag = cli.BoolFlag{Name: "halp"}
- cli.BashCompletionFlag = cli.BoolFlag{Name: "compgen", Hidden: true}
- cli.VersionFlag = cli.BoolFlag{Name: "print-version, V"}
-
- cli.HelpPrinter = func(w io.Writer, templ string, data interface{}) {
- fmt.Fprintf(w, "best of luck to you\n")
- }
- cli.VersionPrinter = func(c *cli.Context) {
- fmt.Fprintf(c.App.Writer, "version=%s\n", c.App.Version)
- }
- cli.OsExiter = func(c int) {
- fmt.Fprintf(cli.ErrWriter, "refusing to exit %d\n", c)
- }
- cli.ErrWriter = ioutil.Discard
- cli.FlagStringer = func(fl cli.Flag) string {
- return fmt.Sprintf("\t\t%s", fl.GetName())
- }
-}
-
-type hexWriter struct{}
-
-func (w *hexWriter) Write(p []byte) (int, error) {
- for _, b := range p {
- fmt.Printf("%x", b)
- }
- fmt.Printf("\n")
-
- return len(p), nil
-}
-
-type genericType struct{
- s string
-}
-
-func (g *genericType) Set(value string) error {
- g.s = value
- return nil
-}
-
-func (g *genericType) String() string {
- return g.s
-}
-
-func main() {
- app := cli.NewApp()
- app.Name = "kənˈtrīv"
- app.Version = "19.99.0"
- app.Compiled = time.Now()
- app.Authors = []cli.Author{
- cli.Author{
- Name: "Example Human",
- Email: "human@example.com",
- },
- }
- app.Copyright = "(c) 1999 Serious Enterprise"
- app.HelpName = "contrive"
- app.Usage = "demonstrate available API"
- app.UsageText = "contrive - demonstrating the available API"
- app.ArgsUsage = "[args and such]"
- app.Commands = []cli.Command{
- cli.Command{
- Name: "doo",
- Aliases: []string{"do"},
- Category: "motion",
- Usage: "do the doo",
- UsageText: "doo - does the dooing",
- Description: "no really, there is a lot of dooing to be done",
- ArgsUsage: "[arrgh]",
- Flags: []cli.Flag{
- cli.BoolFlag{Name: "forever, forevvarr"},
- },
- Subcommands: cli.Commands{
- cli.Command{
- Name: "wop",
- Action: wopAction,
- },
- },
- SkipFlagParsing: false,
- HideHelp: false,
- Hidden: false,
- HelpName: "doo!",
- BashComplete: func(c *cli.Context) {
- fmt.Fprintf(c.App.Writer, "--better\n")
- },
- Before: func(c *cli.Context) error {
- fmt.Fprintf(c.App.Writer, "brace for impact\n")
- return nil
- },
- After: func(c *cli.Context) error {
- fmt.Fprintf(c.App.Writer, "did we lose anyone?\n")
- return nil
- },
- Action: func(c *cli.Context) error {
- c.Command.FullName()
- c.Command.HasName("wop")
- c.Command.Names()
- c.Command.VisibleFlags()
- fmt.Fprintf(c.App.Writer, "dodododododoodododddooooododododooo\n")
- if c.Bool("forever") {
- c.Command.Run(c)
- }
- return nil
- },
- OnUsageError: func(c *cli.Context, err error, isSubcommand bool) error {
- fmt.Fprintf(c.App.Writer, "for shame\n")
- return err
- },
- },
- }
- app.Flags = []cli.Flag{
- cli.BoolFlag{Name: "fancy"},
- cli.BoolTFlag{Name: "fancier"},
- cli.DurationFlag{Name: "howlong, H", Value: time.Second * 3},
- cli.Float64Flag{Name: "howmuch"},
- cli.GenericFlag{Name: "wat", Value: &genericType{}},
- cli.Int64Flag{Name: "longdistance"},
- cli.Int64SliceFlag{Name: "intervals"},
- cli.IntFlag{Name: "distance"},
- cli.IntSliceFlag{Name: "times"},
- cli.StringFlag{Name: "dance-move, d"},
- cli.StringSliceFlag{Name: "names, N"},
- cli.UintFlag{Name: "age"},
- cli.Uint64Flag{Name: "bigage"},
- }
- app.EnableBashCompletion = true
- app.HideHelp = false
- app.HideVersion = false
- app.BashComplete = func(c *cli.Context) {
- fmt.Fprintf(c.App.Writer, "lipstick\nkiss\nme\nlipstick\nringo\n")
- }
- app.Before = func(c *cli.Context) error {
- fmt.Fprintf(c.App.Writer, "HEEEERE GOES\n")
- return nil
- }
- app.After = func(c *cli.Context) error {
- fmt.Fprintf(c.App.Writer, "Phew!\n")
- return nil
- }
- app.CommandNotFound = func(c *cli.Context, command string) {
- fmt.Fprintf(c.App.Writer, "Thar be no %q here.\n", command)
- }
- app.OnUsageError = func(c *cli.Context, err error, isSubcommand bool) error {
- if isSubcommand {
- return err
- }
-
- fmt.Fprintf(c.App.Writer, "WRONG: %#v\n", err)
- return nil
- }
- app.Action = func(c *cli.Context) error {
- cli.DefaultAppComplete(c)
- cli.HandleExitCoder(errors.New("not an exit coder, though"))
- cli.ShowAppHelp(c)
- cli.ShowCommandCompletions(c, "nope")
- cli.ShowCommandHelp(c, "also-nope")
- cli.ShowCompletions(c)
- cli.ShowSubcommandHelp(c)
- cli.ShowVersion(c)
-
- categories := c.App.Categories()
- categories.AddCommand("sounds", cli.Command{
- Name: "bloop",
- })
-
- for _, category := range c.App.Categories() {
- fmt.Fprintf(c.App.Writer, "%s\n", category.Name)
- fmt.Fprintf(c.App.Writer, "%#v\n", category.Commands)
- fmt.Fprintf(c.App.Writer, "%#v\n", category.VisibleCommands())
- }
-
- fmt.Printf("%#v\n", c.App.Command("doo"))
- if c.Bool("infinite") {
- c.App.Run([]string{"app", "doo", "wop"})
- }
-
- if c.Bool("forevar") {
- c.App.RunAsSubcommand(c)
- }
- c.App.Setup()
- fmt.Printf("%#v\n", c.App.VisibleCategories())
- fmt.Printf("%#v\n", c.App.VisibleCommands())
- fmt.Printf("%#v\n", c.App.VisibleFlags())
-
- fmt.Printf("%#v\n", c.Args().First())
- if len(c.Args()) > 0 {
- fmt.Printf("%#v\n", c.Args()[1])
- }
- fmt.Printf("%#v\n", c.Args().Present())
- fmt.Printf("%#v\n", c.Args().Tail())
-
- set := flag.NewFlagSet("contrive", 0)
- nc := cli.NewContext(c.App, set, c)
-
- fmt.Printf("%#v\n", nc.Args())
- fmt.Printf("%#v\n", nc.Bool("nope"))
- fmt.Printf("%#v\n", nc.BoolT("nerp"))
- fmt.Printf("%#v\n", nc.Duration("howlong"))
- fmt.Printf("%#v\n", nc.Float64("hay"))
- fmt.Printf("%#v\n", nc.Generic("bloop"))
- fmt.Printf("%#v\n", nc.Int64("bonk"))
- fmt.Printf("%#v\n", nc.Int64Slice("burnks"))
- fmt.Printf("%#v\n", nc.Int("bips"))
- fmt.Printf("%#v\n", nc.IntSlice("blups"))
- fmt.Printf("%#v\n", nc.String("snurt"))
- fmt.Printf("%#v\n", nc.StringSlice("snurkles"))
- fmt.Printf("%#v\n", nc.Uint("flub"))
- fmt.Printf("%#v\n", nc.Uint64("florb"))
- fmt.Printf("%#v\n", nc.GlobalBool("global-nope"))
- fmt.Printf("%#v\n", nc.GlobalBoolT("global-nerp"))
- fmt.Printf("%#v\n", nc.GlobalDuration("global-howlong"))
- fmt.Printf("%#v\n", nc.GlobalFloat64("global-hay"))
- fmt.Printf("%#v\n", nc.GlobalGeneric("global-bloop"))
- fmt.Printf("%#v\n", nc.GlobalInt("global-bips"))
- fmt.Printf("%#v\n", nc.GlobalIntSlice("global-blups"))
- fmt.Printf("%#v\n", nc.GlobalString("global-snurt"))
- fmt.Printf("%#v\n", nc.GlobalStringSlice("global-snurkles"))
-
- fmt.Printf("%#v\n", nc.FlagNames())
- fmt.Printf("%#v\n", nc.GlobalFlagNames())
- fmt.Printf("%#v\n", nc.GlobalIsSet("wat"))
- fmt.Printf("%#v\n", nc.GlobalSet("wat", "nope"))
- fmt.Printf("%#v\n", nc.NArg())
- fmt.Printf("%#v\n", nc.NumFlags())
- fmt.Printf("%#v\n", nc.Parent())
-
- nc.Set("wat", "also-nope")
-
- ec := cli.NewExitError("ohwell", 86)
- fmt.Fprintf(c.App.Writer, "%d", ec.ExitCode())
- fmt.Printf("made it!\n")
- return ec
- }
-
- if os.Getenv("HEXY") != "" {
- app.Writer = &hexWriter{}
- app.ErrWriter = &hexWriter{}
- }
-
- app.Metadata = map[string]interface{}{
- "layers": "many",
- "explicable": false,
- "whatever-values": 19.99,
- }
-
- app.Run(os.Args)
-}
-
-func wopAction(c *cli.Context) error {
- fmt.Fprintf(c.App.Writer, ":wave: over here, eh\n")
- return nil
-}
-```
-
-## Contribution Guidelines
-
-Feel free to put up a pull request to fix a bug or maybe add a feature. I will
-give it a code review and make sure that it does not break backwards
-compatibility. If I or any other collaborators agree that it is in line with
-the vision of the project, we will work with you to get the code into
-a mergeable state and merge it into the master branch.
-
-If you have contributed something significant to the project, we will most
-likely add you as a collaborator. As a collaborator you are given the ability
-to merge others pull requests. It is very important that new code does not
-break existing code, so be careful about what code you do choose to merge.
-
-If you feel like you have contributed to the project but have not yet been
-added as a collaborator, we probably forgot to add you, please open an issue.
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/altsrc.go b/vendor/src/github.com/codegangsta/cli/altsrc/altsrc.go
deleted file mode 100644
index ac34bf6..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/altsrc.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package altsrc
-
-//go:generate python ../generate-flag-types altsrc -i ../flag-types.json -o flag_generated.go
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/flag.go b/vendor/src/github.com/codegangsta/cli/altsrc/flag.go
deleted file mode 100644
index ec14e40..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/flag.go
+++ /dev/null
@@ -1,263 +0,0 @@
-package altsrc
-
-import (
- "fmt"
- "os"
- "strconv"
- "strings"
-
- "gopkg.in/urfave/cli.v1"
-)
-
-// FlagInputSourceExtension is an extension interface of cli.Flag that
-// allows a value to be set on the existing parsed flags.
-type FlagInputSourceExtension interface {
- cli.Flag
- ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error
-}
-
-// ApplyInputSourceValues iterates over all provided flags and
-// executes ApplyInputSourceValue on flags implementing the
-// FlagInputSourceExtension interface to initialize these flags
-// to an alternate input source.
-func ApplyInputSourceValues(context *cli.Context, inputSourceContext InputSourceContext, flags []cli.Flag) error {
- for _, f := range flags {
- inputSourceExtendedFlag, isType := f.(FlagInputSourceExtension)
- if isType {
- err := inputSourceExtendedFlag.ApplyInputSourceValue(context, inputSourceContext)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// InitInputSource is used to to setup an InputSourceContext on a cli.Command Before method. It will create a new
-// input source based on the func provided. If there is no error it will then apply the new input source to any flags
-// that are supported by the input source
-func InitInputSource(flags []cli.Flag, createInputSource func() (InputSourceContext, error)) cli.BeforeFunc {
- return func(context *cli.Context) error {
- inputSource, err := createInputSource()
- if err != nil {
- return fmt.Errorf("Unable to create input source: inner error: \n'%v'", err.Error())
- }
-
- return ApplyInputSourceValues(context, inputSource, flags)
- }
-}
-
-// InitInputSourceWithContext is used to to setup an InputSourceContext on a cli.Command Before method. It will create a new
-// input source based on the func provided with potentially using existing cli.Context values to initialize itself. If there is
-// no error it will then apply the new input source to any flags that are supported by the input source
-func InitInputSourceWithContext(flags []cli.Flag, createInputSource func(context *cli.Context) (InputSourceContext, error)) cli.BeforeFunc {
- return func(context *cli.Context) error {
- inputSource, err := createInputSource(context)
- if err != nil {
- return fmt.Errorf("Unable to create input source with context: inner error: \n'%v'", err.Error())
- }
-
- return ApplyInputSourceValues(context, inputSource, flags)
- }
-}
-
-// ApplyInputSourceValue applies a generic value to the flagSet if required
-func (f *GenericFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error {
- if f.set != nil {
- if !context.IsSet(f.Name) && !isEnvVarSet(f.EnvVar) {
- value, err := isc.Generic(f.GenericFlag.Name)
- if err != nil {
- return err
- }
- if value != nil {
- eachName(f.Name, func(name string) {
- f.set.Set(f.Name, value.String())
- })
- }
- }
- }
-
- return nil
-}
-
-// ApplyInputSourceValue applies a StringSlice value to the flagSet if required
-func (f *StringSliceFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error {
- if f.set != nil {
- if !context.IsSet(f.Name) && !isEnvVarSet(f.EnvVar) {
- value, err := isc.StringSlice(f.StringSliceFlag.Name)
- if err != nil {
- return err
- }
- if value != nil {
- var sliceValue cli.StringSlice = value
- eachName(f.Name, func(name string) {
- underlyingFlag := f.set.Lookup(f.Name)
- if underlyingFlag != nil {
- underlyingFlag.Value = &sliceValue
- }
- })
- }
- }
- }
- return nil
-}
-
-// ApplyInputSourceValue applies a IntSlice value if required
-func (f *IntSliceFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error {
- if f.set != nil {
- if !context.IsSet(f.Name) && !isEnvVarSet(f.EnvVar) {
- value, err := isc.IntSlice(f.IntSliceFlag.Name)
- if err != nil {
- return err
- }
- if value != nil {
- var sliceValue cli.IntSlice = value
- eachName(f.Name, func(name string) {
- underlyingFlag := f.set.Lookup(f.Name)
- if underlyingFlag != nil {
- underlyingFlag.Value = &sliceValue
- }
- })
- }
- }
- }
- return nil
-}
-
-// ApplyInputSourceValue applies a Bool value to the flagSet if required
-func (f *BoolFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error {
- if f.set != nil {
- if !context.IsSet(f.Name) && !isEnvVarSet(f.EnvVar) {
- value, err := isc.Bool(f.BoolFlag.Name)
- if err != nil {
- return err
- }
- if value {
- eachName(f.Name, func(name string) {
- f.set.Set(f.Name, strconv.FormatBool(value))
- })
- }
- }
- }
- return nil
-}
-
-// ApplyInputSourceValue applies a BoolT value to the flagSet if required
-func (f *BoolTFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error {
- if f.set != nil {
- if !context.IsSet(f.Name) && !isEnvVarSet(f.EnvVar) {
- value, err := isc.BoolT(f.BoolTFlag.Name)
- if err != nil {
- return err
- }
- if !value {
- eachName(f.Name, func(name string) {
- f.set.Set(f.Name, strconv.FormatBool(value))
- })
- }
- }
- }
- return nil
-}
-
-// ApplyInputSourceValue applies a String value to the flagSet if required
-func (f *StringFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error {
- if f.set != nil {
- if !(context.IsSet(f.Name) || isEnvVarSet(f.EnvVar)) {
- value, err := isc.String(f.StringFlag.Name)
- if err != nil {
- return err
- }
- if value != "" {
- eachName(f.Name, func(name string) {
- f.set.Set(f.Name, value)
- })
- }
- }
- }
- return nil
-}
-
-// ApplyInputSourceValue applies a int value to the flagSet if required
-func (f *IntFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error {
- if f.set != nil {
- if !(context.IsSet(f.Name) || isEnvVarSet(f.EnvVar)) {
- value, err := isc.Int(f.IntFlag.Name)
- if err != nil {
- return err
- }
- if value > 0 {
- eachName(f.Name, func(name string) {
- f.set.Set(f.Name, strconv.FormatInt(int64(value), 10))
- })
- }
- }
- }
- return nil
-}
-
-// ApplyInputSourceValue applies a Duration value to the flagSet if required
-func (f *DurationFlag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error {
- if f.set != nil {
- if !(context.IsSet(f.Name) || isEnvVarSet(f.EnvVar)) {
- value, err := isc.Duration(f.DurationFlag.Name)
- if err != nil {
- return err
- }
- if value > 0 {
- eachName(f.Name, func(name string) {
- f.set.Set(f.Name, value.String())
- })
- }
- }
- }
- return nil
-}
-
-// ApplyInputSourceValue applies a Float64 value to the flagSet if required
-func (f *Float64Flag) ApplyInputSourceValue(context *cli.Context, isc InputSourceContext) error {
- if f.set != nil {
- if !(context.IsSet(f.Name) || isEnvVarSet(f.EnvVar)) {
- value, err := isc.Float64(f.Float64Flag.Name)
- if err != nil {
- return err
- }
- if value > 0 {
- floatStr := float64ToString(value)
- eachName(f.Name, func(name string) {
- f.set.Set(f.Name, floatStr)
- })
- }
- }
- }
- return nil
-}
-
-func isEnvVarSet(envVars string) bool {
- for _, envVar := range strings.Split(envVars, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- // TODO: Can't use this for bools as
- // set means that it was true or false based on
- // Bool flag type, should work for other types
- if len(envVal) > 0 {
- return true
- }
- }
- }
-
- return false
-}
-
-func float64ToString(f float64) string {
- return fmt.Sprintf("%v", f)
-}
-
-func eachName(longName string, fn func(string)) {
- parts := strings.Split(longName, ",")
- for _, name := range parts {
- name = strings.Trim(name, " ")
- fn(name)
- }
-}
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/flag_generated.go b/vendor/src/github.com/codegangsta/cli/altsrc/flag_generated.go
deleted file mode 100644
index b6b96a1..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/flag_generated.go
+++ /dev/null
@@ -1,256 +0,0 @@
-package altsrc
-
-import (
- "flag"
-
- "gopkg.in/urfave/cli.v1"
-)
-
-// WARNING: This file is generated!
-
-// BoolFlag is the flag type that wraps cli.BoolFlag to allow
-// for other values to be specified
-type BoolFlag struct {
- cli.BoolFlag
- set *flag.FlagSet
-}
-
-// NewBoolFlag creates a new BoolFlag
-func NewBoolFlag(fl cli.BoolFlag) *BoolFlag {
- return &BoolFlag{BoolFlag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped BoolFlag.Apply
-func (f *BoolFlag) Apply(set *flag.FlagSet) {
- f.set = set
- f.BoolFlag.Apply(set)
-}
-
-// BoolTFlag is the flag type that wraps cli.BoolTFlag to allow
-// for other values to be specified
-type BoolTFlag struct {
- cli.BoolTFlag
- set *flag.FlagSet
-}
-
-// NewBoolTFlag creates a new BoolTFlag
-func NewBoolTFlag(fl cli.BoolTFlag) *BoolTFlag {
- return &BoolTFlag{BoolTFlag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped BoolTFlag.Apply
-func (f *BoolTFlag) Apply(set *flag.FlagSet) {
- f.set = set
- f.BoolTFlag.Apply(set)
-}
-
-// DurationFlag is the flag type that wraps cli.DurationFlag to allow
-// for other values to be specified
-type DurationFlag struct {
- cli.DurationFlag
- set *flag.FlagSet
-}
-
-// NewDurationFlag creates a new DurationFlag
-func NewDurationFlag(fl cli.DurationFlag) *DurationFlag {
- return &DurationFlag{DurationFlag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped DurationFlag.Apply
-func (f *DurationFlag) Apply(set *flag.FlagSet) {
- f.set = set
- f.DurationFlag.Apply(set)
-}
-
-// Float64Flag is the flag type that wraps cli.Float64Flag to allow
-// for other values to be specified
-type Float64Flag struct {
- cli.Float64Flag
- set *flag.FlagSet
-}
-
-// NewFloat64Flag creates a new Float64Flag
-func NewFloat64Flag(fl cli.Float64Flag) *Float64Flag {
- return &Float64Flag{Float64Flag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped Float64Flag.Apply
-func (f *Float64Flag) Apply(set *flag.FlagSet) {
- f.set = set
- f.Float64Flag.Apply(set)
-}
-
-// GenericFlag is the flag type that wraps cli.GenericFlag to allow
-// for other values to be specified
-type GenericFlag struct {
- cli.GenericFlag
- set *flag.FlagSet
-}
-
-// NewGenericFlag creates a new GenericFlag
-func NewGenericFlag(fl cli.GenericFlag) *GenericFlag {
- return &GenericFlag{GenericFlag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped GenericFlag.Apply
-func (f *GenericFlag) Apply(set *flag.FlagSet) {
- f.set = set
- f.GenericFlag.Apply(set)
-}
-
-// Int64Flag is the flag type that wraps cli.Int64Flag to allow
-// for other values to be specified
-type Int64Flag struct {
- cli.Int64Flag
- set *flag.FlagSet
-}
-
-// NewInt64Flag creates a new Int64Flag
-func NewInt64Flag(fl cli.Int64Flag) *Int64Flag {
- return &Int64Flag{Int64Flag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped Int64Flag.Apply
-func (f *Int64Flag) Apply(set *flag.FlagSet) {
- f.set = set
- f.Int64Flag.Apply(set)
-}
-
-// IntFlag is the flag type that wraps cli.IntFlag to allow
-// for other values to be specified
-type IntFlag struct {
- cli.IntFlag
- set *flag.FlagSet
-}
-
-// NewIntFlag creates a new IntFlag
-func NewIntFlag(fl cli.IntFlag) *IntFlag {
- return &IntFlag{IntFlag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped IntFlag.Apply
-func (f *IntFlag) Apply(set *flag.FlagSet) {
- f.set = set
- f.IntFlag.Apply(set)
-}
-
-// IntSliceFlag is the flag type that wraps cli.IntSliceFlag to allow
-// for other values to be specified
-type IntSliceFlag struct {
- cli.IntSliceFlag
- set *flag.FlagSet
-}
-
-// NewIntSliceFlag creates a new IntSliceFlag
-func NewIntSliceFlag(fl cli.IntSliceFlag) *IntSliceFlag {
- return &IntSliceFlag{IntSliceFlag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped IntSliceFlag.Apply
-func (f *IntSliceFlag) Apply(set *flag.FlagSet) {
- f.set = set
- f.IntSliceFlag.Apply(set)
-}
-
-// Int64SliceFlag is the flag type that wraps cli.Int64SliceFlag to allow
-// for other values to be specified
-type Int64SliceFlag struct {
- cli.Int64SliceFlag
- set *flag.FlagSet
-}
-
-// NewInt64SliceFlag creates a new Int64SliceFlag
-func NewInt64SliceFlag(fl cli.Int64SliceFlag) *Int64SliceFlag {
- return &Int64SliceFlag{Int64SliceFlag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped Int64SliceFlag.Apply
-func (f *Int64SliceFlag) Apply(set *flag.FlagSet) {
- f.set = set
- f.Int64SliceFlag.Apply(set)
-}
-
-// StringFlag is the flag type that wraps cli.StringFlag to allow
-// for other values to be specified
-type StringFlag struct {
- cli.StringFlag
- set *flag.FlagSet
-}
-
-// NewStringFlag creates a new StringFlag
-func NewStringFlag(fl cli.StringFlag) *StringFlag {
- return &StringFlag{StringFlag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped StringFlag.Apply
-func (f *StringFlag) Apply(set *flag.FlagSet) {
- f.set = set
- f.StringFlag.Apply(set)
-}
-
-// StringSliceFlag is the flag type that wraps cli.StringSliceFlag to allow
-// for other values to be specified
-type StringSliceFlag struct {
- cli.StringSliceFlag
- set *flag.FlagSet
-}
-
-// NewStringSliceFlag creates a new StringSliceFlag
-func NewStringSliceFlag(fl cli.StringSliceFlag) *StringSliceFlag {
- return &StringSliceFlag{StringSliceFlag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped StringSliceFlag.Apply
-func (f *StringSliceFlag) Apply(set *flag.FlagSet) {
- f.set = set
- f.StringSliceFlag.Apply(set)
-}
-
-// Uint64Flag is the flag type that wraps cli.Uint64Flag to allow
-// for other values to be specified
-type Uint64Flag struct {
- cli.Uint64Flag
- set *flag.FlagSet
-}
-
-// NewUint64Flag creates a new Uint64Flag
-func NewUint64Flag(fl cli.Uint64Flag) *Uint64Flag {
- return &Uint64Flag{Uint64Flag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped Uint64Flag.Apply
-func (f *Uint64Flag) Apply(set *flag.FlagSet) {
- f.set = set
- f.Uint64Flag.Apply(set)
-}
-
-// UintFlag is the flag type that wraps cli.UintFlag to allow
-// for other values to be specified
-type UintFlag struct {
- cli.UintFlag
- set *flag.FlagSet
-}
-
-// NewUintFlag creates a new UintFlag
-func NewUintFlag(fl cli.UintFlag) *UintFlag {
- return &UintFlag{UintFlag: fl, set: nil}
-}
-
-// Apply saves the flagSet for later usage calls, then calls the
-// wrapped UintFlag.Apply
-func (f *UintFlag) Apply(set *flag.FlagSet) {
- f.set = set
- f.UintFlag.Apply(set)
-}
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/flag_test.go b/vendor/src/github.com/codegangsta/cli/altsrc/flag_test.go
deleted file mode 100644
index 9e9c96d..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/flag_test.go
+++ /dev/null
@@ -1,336 +0,0 @@
-package altsrc
-
-import (
- "flag"
- "fmt"
- "os"
- "strings"
- "testing"
- "time"
-
- "gopkg.in/urfave/cli.v1"
-)
-
-type testApplyInputSource struct {
- Flag FlagInputSourceExtension
- FlagName string
- FlagSetName string
- Expected string
- ContextValueString string
- ContextValue flag.Value
- EnvVarValue string
- EnvVarName string
- MapValue interface{}
-}
-
-func TestGenericApplyInputSourceValue(t *testing.T) {
- v := &Parser{"abc", "def"}
- c := runTest(t, testApplyInputSource{
- Flag: NewGenericFlag(cli.GenericFlag{Name: "test", Value: &Parser{}}),
- FlagName: "test",
- MapValue: v,
- })
- expect(t, v, c.Generic("test"))
-}
-
-func TestGenericApplyInputSourceMethodContextSet(t *testing.T) {
- p := &Parser{"abc", "def"}
- c := runTest(t, testApplyInputSource{
- Flag: NewGenericFlag(cli.GenericFlag{Name: "test", Value: &Parser{}}),
- FlagName: "test",
- MapValue: &Parser{"efg", "hig"},
- ContextValueString: p.String(),
- })
- expect(t, p, c.Generic("test"))
-}
-
-func TestGenericApplyInputSourceMethodEnvVarSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewGenericFlag(cli.GenericFlag{Name: "test", Value: &Parser{}, EnvVar: "TEST"}),
- FlagName: "test",
- MapValue: &Parser{"efg", "hij"},
- EnvVarName: "TEST",
- EnvVarValue: "abc,def",
- })
- expect(t, &Parser{"abc", "def"}, c.Generic("test"))
-}
-
-func TestStringSliceApplyInputSourceValue(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewStringSliceFlag(cli.StringSliceFlag{Name: "test"}),
- FlagName: "test",
- MapValue: []string{"hello", "world"},
- })
- expect(t, c.StringSlice("test"), []string{"hello", "world"})
-}
-
-func TestStringSliceApplyInputSourceMethodContextSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewStringSliceFlag(cli.StringSliceFlag{Name: "test"}),
- FlagName: "test",
- MapValue: []string{"hello", "world"},
- ContextValueString: "ohno",
- })
- expect(t, c.StringSlice("test"), []string{"ohno"})
-}
-
-func TestStringSliceApplyInputSourceMethodEnvVarSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewStringSliceFlag(cli.StringSliceFlag{Name: "test", EnvVar: "TEST"}),
- FlagName: "test",
- MapValue: []string{"hello", "world"},
- EnvVarName: "TEST",
- EnvVarValue: "oh,no",
- })
- expect(t, c.StringSlice("test"), []string{"oh", "no"})
-}
-
-func TestIntSliceApplyInputSourceValue(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewIntSliceFlag(cli.IntSliceFlag{Name: "test"}),
- FlagName: "test",
- MapValue: []int{1, 2},
- })
- expect(t, c.IntSlice("test"), []int{1, 2})
-}
-
-func TestIntSliceApplyInputSourceMethodContextSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewIntSliceFlag(cli.IntSliceFlag{Name: "test"}),
- FlagName: "test",
- MapValue: []int{1, 2},
- ContextValueString: "3",
- })
- expect(t, c.IntSlice("test"), []int{3})
-}
-
-func TestIntSliceApplyInputSourceMethodEnvVarSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewIntSliceFlag(cli.IntSliceFlag{Name: "test", EnvVar: "TEST"}),
- FlagName: "test",
- MapValue: []int{1, 2},
- EnvVarName: "TEST",
- EnvVarValue: "3,4",
- })
- expect(t, c.IntSlice("test"), []int{3, 4})
-}
-
-func TestBoolApplyInputSourceMethodSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewBoolFlag(cli.BoolFlag{Name: "test"}),
- FlagName: "test",
- MapValue: true,
- })
- expect(t, true, c.Bool("test"))
-}
-
-func TestBoolApplyInputSourceMethodContextSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewBoolFlag(cli.BoolFlag{Name: "test"}),
- FlagName: "test",
- MapValue: false,
- ContextValueString: "true",
- })
- expect(t, true, c.Bool("test"))
-}
-
-func TestBoolApplyInputSourceMethodEnvVarSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewBoolFlag(cli.BoolFlag{Name: "test", EnvVar: "TEST"}),
- FlagName: "test",
- MapValue: false,
- EnvVarName: "TEST",
- EnvVarValue: "true",
- })
- expect(t, true, c.Bool("test"))
-}
-
-func TestBoolTApplyInputSourceMethodSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewBoolTFlag(cli.BoolTFlag{Name: "test"}),
- FlagName: "test",
- MapValue: false,
- })
- expect(t, false, c.BoolT("test"))
-}
-
-func TestBoolTApplyInputSourceMethodContextSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewBoolTFlag(cli.BoolTFlag{Name: "test"}),
- FlagName: "test",
- MapValue: true,
- ContextValueString: "false",
- })
- expect(t, false, c.BoolT("test"))
-}
-
-func TestBoolTApplyInputSourceMethodEnvVarSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewBoolTFlag(cli.BoolTFlag{Name: "test", EnvVar: "TEST"}),
- FlagName: "test",
- MapValue: true,
- EnvVarName: "TEST",
- EnvVarValue: "false",
- })
- expect(t, false, c.BoolT("test"))
-}
-
-func TestStringApplyInputSourceMethodSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewStringFlag(cli.StringFlag{Name: "test"}),
- FlagName: "test",
- MapValue: "hello",
- })
- expect(t, "hello", c.String("test"))
-}
-
-func TestStringApplyInputSourceMethodContextSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewStringFlag(cli.StringFlag{Name: "test"}),
- FlagName: "test",
- MapValue: "hello",
- ContextValueString: "goodbye",
- })
- expect(t, "goodbye", c.String("test"))
-}
-
-func TestStringApplyInputSourceMethodEnvVarSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewStringFlag(cli.StringFlag{Name: "test", EnvVar: "TEST"}),
- FlagName: "test",
- MapValue: "hello",
- EnvVarName: "TEST",
- EnvVarValue: "goodbye",
- })
- expect(t, "goodbye", c.String("test"))
-}
-
-func TestIntApplyInputSourceMethodSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewIntFlag(cli.IntFlag{Name: "test"}),
- FlagName: "test",
- MapValue: 15,
- })
- expect(t, 15, c.Int("test"))
-}
-
-func TestIntApplyInputSourceMethodContextSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewIntFlag(cli.IntFlag{Name: "test"}),
- FlagName: "test",
- MapValue: 15,
- ContextValueString: "7",
- })
- expect(t, 7, c.Int("test"))
-}
-
-func TestIntApplyInputSourceMethodEnvVarSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewIntFlag(cli.IntFlag{Name: "test", EnvVar: "TEST"}),
- FlagName: "test",
- MapValue: 15,
- EnvVarName: "TEST",
- EnvVarValue: "12",
- })
- expect(t, 12, c.Int("test"))
-}
-
-func TestDurationApplyInputSourceMethodSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewDurationFlag(cli.DurationFlag{Name: "test"}),
- FlagName: "test",
- MapValue: time.Duration(30 * time.Second),
- })
- expect(t, time.Duration(30*time.Second), c.Duration("test"))
-}
-
-func TestDurationApplyInputSourceMethodContextSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewDurationFlag(cli.DurationFlag{Name: "test"}),
- FlagName: "test",
- MapValue: time.Duration(30 * time.Second),
- ContextValueString: time.Duration(15 * time.Second).String(),
- })
- expect(t, time.Duration(15*time.Second), c.Duration("test"))
-}
-
-func TestDurationApplyInputSourceMethodEnvVarSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewDurationFlag(cli.DurationFlag{Name: "test", EnvVar: "TEST"}),
- FlagName: "test",
- MapValue: time.Duration(30 * time.Second),
- EnvVarName: "TEST",
- EnvVarValue: time.Duration(15 * time.Second).String(),
- })
- expect(t, time.Duration(15*time.Second), c.Duration("test"))
-}
-
-func TestFloat64ApplyInputSourceMethodSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewFloat64Flag(cli.Float64Flag{Name: "test"}),
- FlagName: "test",
- MapValue: 1.3,
- })
- expect(t, 1.3, c.Float64("test"))
-}
-
-func TestFloat64ApplyInputSourceMethodContextSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewFloat64Flag(cli.Float64Flag{Name: "test"}),
- FlagName: "test",
- MapValue: 1.3,
- ContextValueString: fmt.Sprintf("%v", 1.4),
- })
- expect(t, 1.4, c.Float64("test"))
-}
-
-func TestFloat64ApplyInputSourceMethodEnvVarSet(t *testing.T) {
- c := runTest(t, testApplyInputSource{
- Flag: NewFloat64Flag(cli.Float64Flag{Name: "test", EnvVar: "TEST"}),
- FlagName: "test",
- MapValue: 1.3,
- EnvVarName: "TEST",
- EnvVarValue: fmt.Sprintf("%v", 1.4),
- })
- expect(t, 1.4, c.Float64("test"))
-}
-
-func runTest(t *testing.T, test testApplyInputSource) *cli.Context {
- inputSource := &MapInputSource{valueMap: map[interface{}]interface{}{test.FlagName: test.MapValue}}
- set := flag.NewFlagSet(test.FlagSetName, flag.ContinueOnError)
- c := cli.NewContext(nil, set, nil)
- if test.EnvVarName != "" && test.EnvVarValue != "" {
- os.Setenv(test.EnvVarName, test.EnvVarValue)
- defer os.Setenv(test.EnvVarName, "")
- }
-
- test.Flag.Apply(set)
- if test.ContextValue != nil {
- flag := set.Lookup(test.FlagName)
- flag.Value = test.ContextValue
- }
- if test.ContextValueString != "" {
- set.Set(test.FlagName, test.ContextValueString)
- }
- test.Flag.ApplyInputSourceValue(c, inputSource)
-
- return c
-}
-
-type Parser [2]string
-
-func (p *Parser) Set(value string) error {
- parts := strings.Split(value, ",")
- if len(parts) != 2 {
- return fmt.Errorf("invalid format")
- }
-
- (*p)[0] = parts[0]
- (*p)[1] = parts[1]
-
- return nil
-}
-
-func (p *Parser) String() string {
- return fmt.Sprintf("%s,%s", p[0], p[1])
-}
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/helpers_test.go b/vendor/src/github.com/codegangsta/cli/altsrc/helpers_test.go
deleted file mode 100644
index 3b7f7e9..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/helpers_test.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package altsrc
-
-import (
- "reflect"
- "testing"
-)
-
-func expect(t *testing.T, a interface{}, b interface{}) {
- if !reflect.DeepEqual(b, a) {
- t.Errorf("Expected %#v (type %v) - Got %#v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
- }
-}
-
-func refute(t *testing.T, a interface{}, b interface{}) {
- if a == b {
- t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
- }
-}
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/input_source_context.go b/vendor/src/github.com/codegangsta/cli/altsrc/input_source_context.go
deleted file mode 100644
index 276dcda..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/input_source_context.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package altsrc
-
-import (
- "time"
-
- "gopkg.in/urfave/cli.v1"
-)
-
-// InputSourceContext is an interface used to allow
-// other input sources to be implemented as needed.
-type InputSourceContext interface {
- Int(name string) (int, error)
- Duration(name string) (time.Duration, error)
- Float64(name string) (float64, error)
- String(name string) (string, error)
- StringSlice(name string) ([]string, error)
- IntSlice(name string) ([]int, error)
- Generic(name string) (cli.Generic, error)
- Bool(name string) (bool, error)
- BoolT(name string) (bool, error)
-}
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/map_input_source.go b/vendor/src/github.com/codegangsta/cli/altsrc/map_input_source.go
deleted file mode 100644
index b720995..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/map_input_source.go
+++ /dev/null
@@ -1,248 +0,0 @@
-package altsrc
-
-import (
- "fmt"
- "reflect"
- "strings"
- "time"
-
- "gopkg.in/urfave/cli.v1"
-)
-
-// MapInputSource implements InputSourceContext to return
-// data from the map that is loaded.
-type MapInputSource struct {
- valueMap map[interface{}]interface{}
-}
-
-// nestedVal checks if the name has '.' delimiters.
-// If so, it tries to traverse the tree by the '.' delimited sections to find
-// a nested value for the key.
-func nestedVal(name string, tree map[interface{}]interface{}) (interface{}, bool) {
- if sections := strings.Split(name, "."); len(sections) > 1 {
- node := tree
- for _, section := range sections[:len(sections)-1] {
- if child, ok := node[section]; !ok {
- return nil, false
- } else {
- if ctype, ok := child.(map[interface{}]interface{}); !ok {
- return nil, false
- } else {
- node = ctype
- }
- }
- }
- if val, ok := node[sections[len(sections)-1]]; ok {
- return val, true
- }
- }
- return nil, false
-}
-
-// Int returns an int from the map if it exists otherwise returns 0
-func (fsm *MapInputSource) Int(name string) (int, error) {
- otherGenericValue, exists := fsm.valueMap[name]
- if exists {
- otherValue, isType := otherGenericValue.(int)
- if !isType {
- return 0, incorrectTypeForFlagError(name, "int", otherGenericValue)
- }
- return otherValue, nil
- }
- nestedGenericValue, exists := nestedVal(name, fsm.valueMap)
- if exists {
- otherValue, isType := nestedGenericValue.(int)
- if !isType {
- return 0, incorrectTypeForFlagError(name, "int", nestedGenericValue)
- }
- return otherValue, nil
- }
-
- return 0, nil
-}
-
-// Duration returns a duration from the map if it exists otherwise returns 0
-func (fsm *MapInputSource) Duration(name string) (time.Duration, error) {
- otherGenericValue, exists := fsm.valueMap[name]
- if exists {
- otherValue, isType := otherGenericValue.(time.Duration)
- if !isType {
- return 0, incorrectTypeForFlagError(name, "duration", otherGenericValue)
- }
- return otherValue, nil
- }
- nestedGenericValue, exists := nestedVal(name, fsm.valueMap)
- if exists {
- otherValue, isType := nestedGenericValue.(time.Duration)
- if !isType {
- return 0, incorrectTypeForFlagError(name, "duration", nestedGenericValue)
- }
- return otherValue, nil
- }
-
- return 0, nil
-}
-
-// Float64 returns an float64 from the map if it exists otherwise returns 0
-func (fsm *MapInputSource) Float64(name string) (float64, error) {
- otherGenericValue, exists := fsm.valueMap[name]
- if exists {
- otherValue, isType := otherGenericValue.(float64)
- if !isType {
- return 0, incorrectTypeForFlagError(name, "float64", otherGenericValue)
- }
- return otherValue, nil
- }
- nestedGenericValue, exists := nestedVal(name, fsm.valueMap)
- if exists {
- otherValue, isType := nestedGenericValue.(float64)
- if !isType {
- return 0, incorrectTypeForFlagError(name, "float64", nestedGenericValue)
- }
- return otherValue, nil
- }
-
- return 0, nil
-}
-
-// String returns a string from the map if it exists otherwise returns an empty string
-func (fsm *MapInputSource) String(name string) (string, error) {
- otherGenericValue, exists := fsm.valueMap[name]
- if exists {
- otherValue, isType := otherGenericValue.(string)
- if !isType {
- return "", incorrectTypeForFlagError(name, "string", otherGenericValue)
- }
- return otherValue, nil
- }
- nestedGenericValue, exists := nestedVal(name, fsm.valueMap)
- if exists {
- otherValue, isType := nestedGenericValue.(string)
- if !isType {
- return "", incorrectTypeForFlagError(name, "string", nestedGenericValue)
- }
- return otherValue, nil
- }
-
- return "", nil
-}
-
-// StringSlice returns an []string from the map if it exists otherwise returns nil
-func (fsm *MapInputSource) StringSlice(name string) ([]string, error) {
- otherGenericValue, exists := fsm.valueMap[name]
- if exists {
- otherValue, isType := otherGenericValue.([]string)
- if !isType {
- return nil, incorrectTypeForFlagError(name, "[]string", otherGenericValue)
- }
- return otherValue, nil
- }
- nestedGenericValue, exists := nestedVal(name, fsm.valueMap)
- if exists {
- otherValue, isType := nestedGenericValue.([]string)
- if !isType {
- return nil, incorrectTypeForFlagError(name, "[]string", nestedGenericValue)
- }
- return otherValue, nil
- }
-
- return nil, nil
-}
-
-// IntSlice returns an []int from the map if it exists otherwise returns nil
-func (fsm *MapInputSource) IntSlice(name string) ([]int, error) {
- otherGenericValue, exists := fsm.valueMap[name]
- if exists {
- otherValue, isType := otherGenericValue.([]int)
- if !isType {
- return nil, incorrectTypeForFlagError(name, "[]int", otherGenericValue)
- }
- return otherValue, nil
- }
- nestedGenericValue, exists := nestedVal(name, fsm.valueMap)
- if exists {
- otherValue, isType := nestedGenericValue.([]int)
- if !isType {
- return nil, incorrectTypeForFlagError(name, "[]int", nestedGenericValue)
- }
- return otherValue, nil
- }
-
- return nil, nil
-}
-
-// Generic returns an cli.Generic from the map if it exists otherwise returns nil
-func (fsm *MapInputSource) Generic(name string) (cli.Generic, error) {
- otherGenericValue, exists := fsm.valueMap[name]
- if exists {
- otherValue, isType := otherGenericValue.(cli.Generic)
- if !isType {
- return nil, incorrectTypeForFlagError(name, "cli.Generic", otherGenericValue)
- }
- return otherValue, nil
- }
- nestedGenericValue, exists := nestedVal(name, fsm.valueMap)
- if exists {
- otherValue, isType := nestedGenericValue.(cli.Generic)
- if !isType {
- return nil, incorrectTypeForFlagError(name, "cli.Generic", nestedGenericValue)
- }
- return otherValue, nil
- }
-
- return nil, nil
-}
-
-// Bool returns an bool from the map otherwise returns false
-func (fsm *MapInputSource) Bool(name string) (bool, error) {
- otherGenericValue, exists := fsm.valueMap[name]
- if exists {
- otherValue, isType := otherGenericValue.(bool)
- if !isType {
- return false, incorrectTypeForFlagError(name, "bool", otherGenericValue)
- }
- return otherValue, nil
- }
- nestedGenericValue, exists := nestedVal(name, fsm.valueMap)
- if exists {
- otherValue, isType := nestedGenericValue.(bool)
- if !isType {
- return false, incorrectTypeForFlagError(name, "bool", nestedGenericValue)
- }
- return otherValue, nil
- }
-
- return false, nil
-}
-
-// BoolT returns an bool from the map otherwise returns true
-func (fsm *MapInputSource) BoolT(name string) (bool, error) {
- otherGenericValue, exists := fsm.valueMap[name]
- if exists {
- otherValue, isType := otherGenericValue.(bool)
- if !isType {
- return true, incorrectTypeForFlagError(name, "bool", otherGenericValue)
- }
- return otherValue, nil
- }
- nestedGenericValue, exists := nestedVal(name, fsm.valueMap)
- if exists {
- otherValue, isType := nestedGenericValue.(bool)
- if !isType {
- return true, incorrectTypeForFlagError(name, "bool", nestedGenericValue)
- }
- return otherValue, nil
- }
-
- return true, nil
-}
-
-func incorrectTypeForFlagError(name, expectedTypeName string, value interface{}) error {
- valueType := reflect.TypeOf(value)
- valueTypeName := ""
- if valueType != nil {
- valueTypeName = valueType.Name()
- }
-
- return fmt.Errorf("Mismatched type for flag '%s'. Expected '%s' but actual is '%s'", name, expectedTypeName, valueTypeName)
-}
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/toml_command_test.go b/vendor/src/github.com/codegangsta/cli/altsrc/toml_command_test.go
deleted file mode 100644
index a5053d4..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/toml_command_test.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// Disabling building of toml support in cases where golang is 1.0 or 1.1
-// as the encoding library is not implemented or supported.
-
-// +build go1.2
-
-package altsrc
-
-import (
- "flag"
- "io/ioutil"
- "os"
- "testing"
-
- "gopkg.in/urfave/cli.v1"
-)
-
-func TestCommandTomFileTest(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.toml", []byte("test = 15"), 0666)
- defer os.Remove("current.toml")
- test := []string{"test-cmd", "--load", "current.toml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("test")
- expect(t, val, 15)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "test"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load"))
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandTomlFileTestGlobalEnvVarWins(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.toml", []byte("test = 15"), 0666)
- defer os.Remove("current.toml")
-
- os.Setenv("THE_TEST", "10")
- defer os.Setenv("THE_TEST", "")
- test := []string{"test-cmd", "--load", "current.toml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("test")
- expect(t, val, 10)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "test", EnvVar: "THE_TEST"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandTomlFileTestGlobalEnvVarWinsNested(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.toml", []byte("[top]\ntest = 15"), 0666)
- defer os.Remove("current.toml")
-
- os.Setenv("THE_TEST", "10")
- defer os.Setenv("THE_TEST", "")
- test := []string{"test-cmd", "--load", "current.toml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("top.test")
- expect(t, val, 10)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "top.test", EnvVar: "THE_TEST"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandTomlFileTestSpecifiedFlagWins(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.toml", []byte("test = 15"), 0666)
- defer os.Remove("current.toml")
-
- test := []string{"test-cmd", "--load", "current.toml", "--test", "7"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("test")
- expect(t, val, 7)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "test"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandTomlFileTestSpecifiedFlagWinsNested(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.toml", []byte(`[top]
- test = 15`), 0666)
- defer os.Remove("current.toml")
-
- test := []string{"test-cmd", "--load", "current.toml", "--top.test", "7"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("top.test")
- expect(t, val, 7)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "top.test"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandTomlFileTestDefaultValueFileWins(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.toml", []byte("test = 15"), 0666)
- defer os.Remove("current.toml")
-
- test := []string{"test-cmd", "--load", "current.toml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("test")
- expect(t, val, 15)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "test", Value: 7}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandTomlFileTestDefaultValueFileWinsNested(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.toml", []byte("[top]\ntest = 15"), 0666)
- defer os.Remove("current.toml")
-
- test := []string{"test-cmd", "--load", "current.toml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("top.test")
- expect(t, val, 15)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "top.test", Value: 7}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandTomlFileFlagHasDefaultGlobalEnvTomlSetGlobalEnvWins(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.toml", []byte("test = 15"), 0666)
- defer os.Remove("current.toml")
-
- os.Setenv("THE_TEST", "11")
- defer os.Setenv("THE_TEST", "")
-
- test := []string{"test-cmd", "--load", "current.toml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("test")
- expect(t, val, 11)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "test", Value: 7, EnvVar: "THE_TEST"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load"))
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandTomlFileFlagHasDefaultGlobalEnvTomlSetGlobalEnvWinsNested(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.toml", []byte("[top]\ntest = 15"), 0666)
- defer os.Remove("current.toml")
-
- os.Setenv("THE_TEST", "11")
- defer os.Setenv("THE_TEST", "")
-
- test := []string{"test-cmd", "--load", "current.toml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("top.test")
- expect(t, val, 11)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "top.test", Value: 7, EnvVar: "THE_TEST"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewTomlSourceFromFlagFunc("load"))
- err := command.Run(c)
-
- expect(t, err, nil)
-}
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/toml_file_loader.go b/vendor/src/github.com/codegangsta/cli/altsrc/toml_file_loader.go
deleted file mode 100644
index 39c124f..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/toml_file_loader.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Disabling building of toml support in cases where golang is 1.0 or 1.1
-// as the encoding library is not implemented or supported.
-
-// +build go1.2
-
-package altsrc
-
-import (
- "fmt"
- "reflect"
-
- "github.com/BurntSushi/toml"
- "gopkg.in/urfave/cli.v1"
-)
-
-type tomlMap struct {
- Map map[interface{}]interface{}
-}
-
-func unmarshalMap(i interface{}) (ret map[interface{}]interface{}, err error) {
- ret = make(map[interface{}]interface{})
- m := i.(map[string]interface{})
- for key, val := range m {
- v := reflect.ValueOf(val)
- switch v.Kind() {
- case reflect.Bool:
- ret[key] = val.(bool)
- case reflect.String:
- ret[key] = val.(string)
- case reflect.Int:
- ret[key] = int(val.(int))
- case reflect.Int8:
- ret[key] = int(val.(int8))
- case reflect.Int16:
- ret[key] = int(val.(int16))
- case reflect.Int32:
- ret[key] = int(val.(int32))
- case reflect.Int64:
- ret[key] = int(val.(int64))
- case reflect.Uint:
- ret[key] = int(val.(uint))
- case reflect.Uint8:
- ret[key] = int(val.(uint8))
- case reflect.Uint16:
- ret[key] = int(val.(uint16))
- case reflect.Uint32:
- ret[key] = int(val.(uint32))
- case reflect.Uint64:
- ret[key] = int(val.(uint64))
- case reflect.Float32:
- ret[key] = float64(val.(float32))
- case reflect.Float64:
- ret[key] = float64(val.(float64))
- case reflect.Map:
- if tmp, err := unmarshalMap(val); err == nil {
- ret[key] = tmp
- } else {
- return nil, err
- }
- case reflect.Array:
- fallthrough // [todo] - Support array type
- default:
- return nil, fmt.Errorf("Unsupported: type = %#v", v.Kind())
- }
- }
- return ret, nil
-}
-
-func (self *tomlMap) UnmarshalTOML(i interface{}) error {
- if tmp, err := unmarshalMap(i); err == nil {
- self.Map = tmp
- } else {
- return err
- }
- return nil
-}
-
-type tomlSourceContext struct {
- FilePath string
-}
-
-// NewTomlSourceFromFile creates a new TOML InputSourceContext from a filepath.
-func NewTomlSourceFromFile(file string) (InputSourceContext, error) {
- tsc := &tomlSourceContext{FilePath: file}
- var results tomlMap = tomlMap{}
- if err := readCommandToml(tsc.FilePath, &results); err != nil {
- return nil, fmt.Errorf("Unable to load TOML file '%s': inner error: \n'%v'", tsc.FilePath, err.Error())
- }
- return &MapInputSource{valueMap: results.Map}, nil
-}
-
-// NewTomlSourceFromFlagFunc creates a new TOML InputSourceContext from a provided flag name and source context.
-func NewTomlSourceFromFlagFunc(flagFileName string) func(context *cli.Context) (InputSourceContext, error) {
- return func(context *cli.Context) (InputSourceContext, error) {
- filePath := context.String(flagFileName)
- return NewTomlSourceFromFile(filePath)
- }
-}
-
-func readCommandToml(filePath string, container interface{}) (err error) {
- b, err := loadDataFrom(filePath)
- if err != nil {
- return err
- }
-
- err = toml.Unmarshal(b, container)
- if err != nil {
- return err
- }
-
- err = nil
- return
-}
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/yaml_command_test.go b/vendor/src/github.com/codegangsta/cli/altsrc/yaml_command_test.go
deleted file mode 100644
index 9d3f431..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/yaml_command_test.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Disabling building of yaml support in cases where golang is 1.0 or 1.1
-// as the encoding library is not implemented or supported.
-
-// +build go1.2
-
-package altsrc
-
-import (
- "flag"
- "io/ioutil"
- "os"
- "testing"
-
- "gopkg.in/urfave/cli.v1"
-)
-
-func TestCommandYamlFileTest(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666)
- defer os.Remove("current.yaml")
- test := []string{"test-cmd", "--load", "current.yaml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("test")
- expect(t, val, 15)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "test"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandYamlFileTestGlobalEnvVarWins(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666)
- defer os.Remove("current.yaml")
-
- os.Setenv("THE_TEST", "10")
- defer os.Setenv("THE_TEST", "")
- test := []string{"test-cmd", "--load", "current.yaml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("test")
- expect(t, val, 10)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "test", EnvVar: "THE_TEST"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandYamlFileTestGlobalEnvVarWinsNested(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.yaml", []byte(`top:
- test: 15`), 0666)
- defer os.Remove("current.yaml")
-
- os.Setenv("THE_TEST", "10")
- defer os.Setenv("THE_TEST", "")
- test := []string{"test-cmd", "--load", "current.yaml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("top.test")
- expect(t, val, 10)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "top.test", EnvVar: "THE_TEST"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandYamlFileTestSpecifiedFlagWins(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666)
- defer os.Remove("current.yaml")
-
- test := []string{"test-cmd", "--load", "current.yaml", "--test", "7"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("test")
- expect(t, val, 7)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "test"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandYamlFileTestSpecifiedFlagWinsNested(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.yaml", []byte(`top:
- test: 15`), 0666)
- defer os.Remove("current.yaml")
-
- test := []string{"test-cmd", "--load", "current.yaml", "--top.test", "7"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("top.test")
- expect(t, val, 7)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "top.test"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandYamlFileTestDefaultValueFileWins(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666)
- defer os.Remove("current.yaml")
-
- test := []string{"test-cmd", "--load", "current.yaml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("test")
- expect(t, val, 15)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "test", Value: 7}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandYamlFileTestDefaultValueFileWinsNested(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.yaml", []byte(`top:
- test: 15`), 0666)
- defer os.Remove("current.yaml")
-
- test := []string{"test-cmd", "--load", "current.yaml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("top.test")
- expect(t, val, 15)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "top.test", Value: 7}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
-
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandYamlFileFlagHasDefaultGlobalEnvYamlSetGlobalEnvWins(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.yaml", []byte("test: 15"), 0666)
- defer os.Remove("current.yaml")
-
- os.Setenv("THE_TEST", "11")
- defer os.Setenv("THE_TEST", "")
-
- test := []string{"test-cmd", "--load", "current.yaml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("test")
- expect(t, val, 11)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "test", Value: 7, EnvVar: "THE_TEST"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
- err := command.Run(c)
-
- expect(t, err, nil)
-}
-
-func TestCommandYamlFileFlagHasDefaultGlobalEnvYamlSetGlobalEnvWinsNested(t *testing.T) {
- app := cli.NewApp()
- set := flag.NewFlagSet("test", 0)
- ioutil.WriteFile("current.yaml", []byte(`top:
- test: 15`), 0666)
- defer os.Remove("current.yaml")
-
- os.Setenv("THE_TEST", "11")
- defer os.Setenv("THE_TEST", "")
-
- test := []string{"test-cmd", "--load", "current.yaml"}
- set.Parse(test)
-
- c := cli.NewContext(app, set, nil)
-
- command := &cli.Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(c *cli.Context) error {
- val := c.Int("top.test")
- expect(t, val, 11)
- return nil
- },
- Flags: []cli.Flag{
- NewIntFlag(cli.IntFlag{Name: "top.test", Value: 7, EnvVar: "THE_TEST"}),
- cli.StringFlag{Name: "load"}},
- }
- command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
- err := command.Run(c)
-
- expect(t, err, nil)
-}
diff --git a/vendor/src/github.com/codegangsta/cli/altsrc/yaml_file_loader.go b/vendor/src/github.com/codegangsta/cli/altsrc/yaml_file_loader.go
deleted file mode 100644
index 335356f..0000000
--- a/vendor/src/github.com/codegangsta/cli/altsrc/yaml_file_loader.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Disabling building of yaml support in cases where golang is 1.0 or 1.1
-// as the encoding library is not implemented or supported.
-
-// +build go1.2
-
-package altsrc
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "os"
-
- "gopkg.in/urfave/cli.v1"
-
- "gopkg.in/yaml.v2"
-)
-
-type yamlSourceContext struct {
- FilePath string
-}
-
-// NewYamlSourceFromFile creates a new Yaml InputSourceContext from a filepath.
-func NewYamlSourceFromFile(file string) (InputSourceContext, error) {
- ysc := &yamlSourceContext{FilePath: file}
- var results map[interface{}]interface{}
- err := readCommandYaml(ysc.FilePath, &results)
- if err != nil {
- return nil, fmt.Errorf("Unable to load Yaml file '%s': inner error: \n'%v'", ysc.FilePath, err.Error())
- }
-
- return &MapInputSource{valueMap: results}, nil
-}
-
-// NewYamlSourceFromFlagFunc creates a new Yaml InputSourceContext from a provided flag name and source context.
-func NewYamlSourceFromFlagFunc(flagFileName string) func(context *cli.Context) (InputSourceContext, error) {
- return func(context *cli.Context) (InputSourceContext, error) {
- filePath := context.String(flagFileName)
- return NewYamlSourceFromFile(filePath)
- }
-}
-
-func readCommandYaml(filePath string, container interface{}) (err error) {
- b, err := loadDataFrom(filePath)
- if err != nil {
- return err
- }
-
- err = yaml.Unmarshal(b, container)
- if err != nil {
- return err
- }
-
- err = nil
- return
-}
-
-func loadDataFrom(filePath string) ([]byte, error) {
- u, err := url.Parse(filePath)
- if err != nil {
- return nil, err
- }
-
- if u.Host != "" { // i have a host, now do i support the scheme?
- switch u.Scheme {
- case "http", "https":
- res, err := http.Get(filePath)
- if err != nil {
- return nil, err
- }
- return ioutil.ReadAll(res.Body)
- default:
- return nil, fmt.Errorf("scheme of %s is unsupported", filePath)
- }
- } else if u.Path != "" { // i dont have a host, but I have a path. I am a local file.
- if _, notFoundFileErr := os.Stat(filePath); notFoundFileErr != nil {
- return nil, fmt.Errorf("Cannot read from file: '%s' because it does not exist.", filePath)
- }
- return ioutil.ReadFile(filePath)
- } else {
- return nil, fmt.Errorf("unable to determine how to load from path %s", filePath)
- }
-}
diff --git a/vendor/src/github.com/codegangsta/cli/app.go b/vendor/src/github.com/codegangsta/cli/app.go
deleted file mode 100644
index b9adf46..0000000
--- a/vendor/src/github.com/codegangsta/cli/app.go
+++ /dev/null
@@ -1,502 +0,0 @@
-package cli
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "reflect"
- "sort"
- "strings"
- "time"
-)
-
-var (
- changeLogURL = "https://github.com/urfave/cli/blob/master/CHANGELOG.md"
- appActionDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-action-signature", changeLogURL)
- runAndExitOnErrorDeprecationURL = fmt.Sprintf("%s#deprecated-cli-app-runandexitonerror", changeLogURL)
-
- contactSysadmin = "This is an error in the application. Please contact the distributor of this application if this is not you."
-
- errNonFuncAction = NewExitError("ERROR invalid Action type. "+
- fmt.Sprintf("Must be a func of type `cli.ActionFunc`. %s", contactSysadmin)+
- fmt.Sprintf("See %s", appActionDeprecationURL), 2)
- errInvalidActionSignature = NewExitError("ERROR invalid Action signature. "+
- fmt.Sprintf("Must be `cli.ActionFunc`. %s", contactSysadmin)+
- fmt.Sprintf("See %s", appActionDeprecationURL), 2)
-)
-
-// App is the main structure of a cli application. It is recommended that
-// an app be created with the cli.NewApp() function
-type App struct {
- // The name of the program. Defaults to path.Base(os.Args[0])
- Name string
- // Full name of command for help, defaults to Name
- HelpName string
- // Description of the program.
- Usage string
- // Text to override the USAGE section of help
- UsageText string
- // Description of the program argument format.
- ArgsUsage string
- // Version of the program
- Version string
- // List of commands to execute
- Commands []Command
- // List of flags to parse
- Flags []Flag
- // Boolean to enable bash completion commands
- EnableBashCompletion bool
- // Boolean to hide built-in help command
- HideHelp bool
- // Boolean to hide built-in version flag and the VERSION section of help
- HideVersion bool
- // Populate on app startup, only gettable through method Categories()
- categories CommandCategories
- // An action to execute when the bash-completion flag is set
- BashComplete BashCompleteFunc
- // An action to execute before any subcommands are run, but after the context is ready
- // If a non-nil error is returned, no subcommands are run
- Before BeforeFunc
- // An action to execute after any subcommands are run, but after the subcommand has finished
- // It is run even if Action() panics
- After AfterFunc
-
- // The action to execute when no subcommands are specified
- // Expects a `cli.ActionFunc` but will accept the *deprecated* signature of `func(*cli.Context) {}`
- // *Note*: support for the deprecated `Action` signature will be removed in a future version
- Action interface{}
-
- // Execute this function if the proper command cannot be found
- CommandNotFound CommandNotFoundFunc
- // Execute this function if an usage error occurs
- OnUsageError OnUsageErrorFunc
- // Compilation date
- Compiled time.Time
- // List of all authors who contributed
- Authors []Author
- // Copyright of the binary if any
- Copyright string
- // Name of Author (Note: Use App.Authors, this is deprecated)
- Author string
- // Email of Author (Note: Use App.Authors, this is deprecated)
- Email string
- // Writer writer to write output to
- Writer io.Writer
- // ErrWriter writes error output
- ErrWriter io.Writer
- // Other custom info
- Metadata map[string]interface{}
-
- didSetup bool
-}
-
-// Tries to find out when this binary was compiled.
-// Returns the current time if it fails to find it.
-func compileTime() time.Time {
- info, err := os.Stat(os.Args[0])
- if err != nil {
- return time.Now()
- }
- return info.ModTime()
-}
-
-// NewApp creates a new cli Application with some reasonable defaults for Name,
-// Usage, Version and Action.
-func NewApp() *App {
- return &App{
- Name: filepath.Base(os.Args[0]),
- HelpName: filepath.Base(os.Args[0]),
- Usage: "A new cli application",
- UsageText: "",
- Version: "0.0.0",
- BashComplete: DefaultAppComplete,
- Action: helpCommand.Action,
- Compiled: compileTime(),
- Writer: os.Stdout,
- }
-}
-
-// Setup runs initialization code to ensure all data structures are ready for
-// `Run` or inspection prior to `Run`. It is internally called by `Run`, but
-// will return early if setup has already happened.
-func (a *App) Setup() {
- if a.didSetup {
- return
- }
-
- a.didSetup = true
-
- if a.Author != "" || a.Email != "" {
- a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email})
- }
-
- newCmds := []Command{}
- for _, c := range a.Commands {
- if c.HelpName == "" {
- c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
- }
- newCmds = append(newCmds, c)
- }
- a.Commands = newCmds
-
- if a.Command(helpCommand.Name) == nil && !a.HideHelp {
- a.Commands = append(a.Commands, helpCommand)
- if (HelpFlag != BoolFlag{}) {
- a.appendFlag(HelpFlag)
- }
- }
-
- if a.EnableBashCompletion {
- a.appendFlag(BashCompletionFlag)
- }
-
- if !a.HideVersion {
- a.appendFlag(VersionFlag)
- }
-
- a.categories = CommandCategories{}
- for _, command := range a.Commands {
- a.categories = a.categories.AddCommand(command.Category, command)
- }
- sort.Sort(a.categories)
-
- if a.Metadata == nil {
- a.Metadata = make(map[string]interface{})
- }
-}
-
-// Run is the entry point to the cli app. Parses the arguments slice and routes
-// to the proper flag/args combination
-func (a *App) Run(arguments []string) (err error) {
- a.Setup()
-
- // parse flags
- set := flagSet(a.Name, a.Flags)
- set.SetOutput(ioutil.Discard)
- err = set.Parse(arguments[1:])
- nerr := normalizeFlags(a.Flags, set)
- context := NewContext(a, set, nil)
- if nerr != nil {
- fmt.Fprintln(a.Writer, nerr)
- ShowAppHelp(context)
- return nerr
- }
-
- if checkCompletions(context) {
- return nil
- }
-
- if err != nil {
- if a.OnUsageError != nil {
- err := a.OnUsageError(context, err, false)
- HandleExitCoder(err)
- return err
- }
- fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
- ShowAppHelp(context)
- return err
- }
-
- if !a.HideHelp && checkHelp(context) {
- ShowAppHelp(context)
- return nil
- }
-
- if !a.HideVersion && checkVersion(context) {
- ShowVersion(context)
- return nil
- }
-
- if a.After != nil {
- defer func() {
- if afterErr := a.After(context); afterErr != nil {
- if err != nil {
- err = NewMultiError(err, afterErr)
- } else {
- err = afterErr
- }
- }
- }()
- }
-
- if a.Before != nil {
- beforeErr := a.Before(context)
- if beforeErr != nil {
- fmt.Fprintf(a.Writer, "%v\n\n", beforeErr)
- ShowAppHelp(context)
- HandleExitCoder(beforeErr)
- err = beforeErr
- return err
- }
- }
-
- args := context.Args()
- if args.Present() {
- name := args.First()
- c := a.Command(name)
- if c != nil {
- return c.Run(context)
- }
- }
-
- // Run default Action
- err = HandleAction(a.Action, context)
-
- HandleExitCoder(err)
- return err
-}
-
-// RunAndExitOnError calls .Run() and exits non-zero if an error was returned
-//
-// Deprecated: instead you should return an error that fulfills cli.ExitCoder
-// to cli.App.Run. This will cause the application to exit with the given eror
-// code in the cli.ExitCoder
-func (a *App) RunAndExitOnError() {
- if err := a.Run(os.Args); err != nil {
- fmt.Fprintln(a.errWriter(), err)
- OsExiter(1)
- }
-}
-
-// RunAsSubcommand invokes the subcommand given the context, parses ctx.Args() to
-// generate command-specific flags
-func (a *App) RunAsSubcommand(ctx *Context) (err error) {
- // append help to commands
- if len(a.Commands) > 0 {
- if a.Command(helpCommand.Name) == nil && !a.HideHelp {
- a.Commands = append(a.Commands, helpCommand)
- if (HelpFlag != BoolFlag{}) {
- a.appendFlag(HelpFlag)
- }
- }
- }
-
- newCmds := []Command{}
- for _, c := range a.Commands {
- if c.HelpName == "" {
- c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
- }
- newCmds = append(newCmds, c)
- }
- a.Commands = newCmds
-
- // append flags
- if a.EnableBashCompletion {
- a.appendFlag(BashCompletionFlag)
- }
-
- // parse flags
- set := flagSet(a.Name, a.Flags)
- set.SetOutput(ioutil.Discard)
- err = set.Parse(ctx.Args().Tail())
- nerr := normalizeFlags(a.Flags, set)
- context := NewContext(a, set, ctx)
-
- if nerr != nil {
- fmt.Fprintln(a.Writer, nerr)
- fmt.Fprintln(a.Writer)
- if len(a.Commands) > 0 {
- ShowSubcommandHelp(context)
- } else {
- ShowCommandHelp(ctx, context.Args().First())
- }
- return nerr
- }
-
- if checkCompletions(context) {
- return nil
- }
-
- if err != nil {
- if a.OnUsageError != nil {
- err = a.OnUsageError(context, err, true)
- HandleExitCoder(err)
- return err
- }
- fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
- ShowSubcommandHelp(context)
- return err
- }
-
- if len(a.Commands) > 0 {
- if checkSubcommandHelp(context) {
- return nil
- }
- } else {
- if checkCommandHelp(ctx, context.Args().First()) {
- return nil
- }
- }
-
- if a.After != nil {
- defer func() {
- afterErr := a.After(context)
- if afterErr != nil {
- HandleExitCoder(err)
- if err != nil {
- err = NewMultiError(err, afterErr)
- } else {
- err = afterErr
- }
- }
- }()
- }
-
- if a.Before != nil {
- beforeErr := a.Before(context)
- if beforeErr != nil {
- HandleExitCoder(beforeErr)
- err = beforeErr
- return err
- }
- }
-
- args := context.Args()
- if args.Present() {
- name := args.First()
- c := a.Command(name)
- if c != nil {
- return c.Run(context)
- }
- }
-
- // Run default Action
- err = HandleAction(a.Action, context)
-
- HandleExitCoder(err)
- return err
-}
-
-// Command returns the named command on App. Returns nil if the command does not exist
-func (a *App) Command(name string) *Command {
- for _, c := range a.Commands {
- if c.HasName(name) {
- return &c
- }
- }
-
- return nil
-}
-
-// Categories returns a slice containing all the categories with the commands they contain
-func (a *App) Categories() CommandCategories {
- return a.categories
-}
-
-// VisibleCategories returns a slice of categories and commands that are
-// Hidden=false
-func (a *App) VisibleCategories() []*CommandCategory {
- ret := []*CommandCategory{}
- for _, category := range a.categories {
- if visible := func() *CommandCategory {
- for _, command := range category.Commands {
- if !command.Hidden {
- return category
- }
- }
- return nil
- }(); visible != nil {
- ret = append(ret, visible)
- }
- }
- return ret
-}
-
-// VisibleCommands returns a slice of the Commands with Hidden=false
-func (a *App) VisibleCommands() []Command {
- ret := []Command{}
- for _, command := range a.Commands {
- if !command.Hidden {
- ret = append(ret, command)
- }
- }
- return ret
-}
-
-// VisibleFlags returns a slice of the Flags with Hidden=false
-func (a *App) VisibleFlags() []Flag {
- return visibleFlags(a.Flags)
-}
-
-func (a *App) hasFlag(flag Flag) bool {
- for _, f := range a.Flags {
- if flag == f {
- return true
- }
- }
-
- return false
-}
-
-func (a *App) errWriter() io.Writer {
-
- // When the app ErrWriter is nil use the package level one.
- if a.ErrWriter == nil {
- return ErrWriter
- }
-
- return a.ErrWriter
-}
-
-func (a *App) appendFlag(flag Flag) {
- if !a.hasFlag(flag) {
- a.Flags = append(a.Flags, flag)
- }
-}
-
-// Author represents someone who has contributed to a cli project.
-type Author struct {
- Name string // The Authors name
- Email string // The Authors email
-}
-
-// String makes Author comply to the Stringer interface, to allow an easy print in the templating process
-func (a Author) String() string {
- e := ""
- if a.Email != "" {
- e = "<" + a.Email + "> "
- }
-
- return fmt.Sprintf("%v %v", a.Name, e)
-}
-
-// HandleAction uses ✧✧✧reflection✧✧✧ to figure out if the given Action is an
-// ActionFunc, a func with the legacy signature for Action, or some other
-// invalid thing. If it's an ActionFunc or a func with the legacy signature for
-// Action, the func is run!
-func HandleAction(action interface{}, context *Context) (err error) {
- defer func() {
- if r := recover(); r != nil {
- // Try to detect a known reflection error from *this scope*, rather than
- // swallowing all panics that may happen when calling an Action func.
- s := fmt.Sprintf("%v", r)
- if strings.HasPrefix(s, "reflect: ") && strings.Contains(s, "too many input arguments") {
- err = NewExitError(fmt.Sprintf("ERROR unknown Action error: %v.", r), 2)
- } else {
- panic(r)
- }
- }
- }()
-
- if reflect.TypeOf(action).Kind() != reflect.Func {
- return errNonFuncAction
- }
-
- vals := reflect.ValueOf(action).Call([]reflect.Value{reflect.ValueOf(context)})
-
- if len(vals) == 0 {
- return nil
- }
-
- if len(vals) > 1 {
- return errInvalidActionSignature
- }
-
- if retErr, ok := vals[0].Interface().(error); vals[0].IsValid() && ok {
- return retErr
- }
-
- return err
-}
diff --git a/vendor/src/github.com/codegangsta/cli/app_test.go b/vendor/src/github.com/codegangsta/cli/app_test.go
deleted file mode 100644
index 23c8aa6..0000000
--- a/vendor/src/github.com/codegangsta/cli/app_test.go
+++ /dev/null
@@ -1,1485 +0,0 @@
-package cli
-
-import (
- "bytes"
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "reflect"
- "strings"
- "testing"
-)
-
-var (
- lastExitCode = 0
- fakeOsExiter = func(rc int) {
- lastExitCode = rc
- }
- fakeErrWriter = &bytes.Buffer{}
-)
-
-func init() {
- OsExiter = fakeOsExiter
- ErrWriter = fakeErrWriter
-}
-
-type opCounts struct {
- Total, BashComplete, OnUsageError, Before, CommandNotFound, Action, After, SubCommand int
-}
-
-func ExampleApp_Run() {
- // set args for examples sake
- os.Args = []string{"greet", "--name", "Jeremy"}
-
- app := NewApp()
- app.Name = "greet"
- app.Flags = []Flag{
- StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
- }
- app.Action = func(c *Context) error {
- fmt.Printf("Hello %v\n", c.String("name"))
- return nil
- }
- app.UsageText = "app [first_arg] [second_arg]"
- app.Author = "Harrison"
- app.Email = "harrison@lolwut.com"
- app.Authors = []Author{{Name: "Oliver Allen", Email: "oliver@toyshop.com"}}
- app.Run(os.Args)
- // Output:
- // Hello Jeremy
-}
-
-func ExampleApp_Run_subcommand() {
- // set args for examples sake
- os.Args = []string{"say", "hi", "english", "--name", "Jeremy"}
- app := NewApp()
- app.Name = "say"
- app.Commands = []Command{
- {
- Name: "hello",
- Aliases: []string{"hi"},
- Usage: "use it to see a description",
- Description: "This is how we describe hello the function",
- Subcommands: []Command{
- {
- Name: "english",
- Aliases: []string{"en"},
- Usage: "sends a greeting in english",
- Description: "greets someone in english",
- Flags: []Flag{
- StringFlag{
- Name: "name",
- Value: "Bob",
- Usage: "Name of the person to greet",
- },
- },
- Action: func(c *Context) error {
- fmt.Println("Hello,", c.String("name"))
- return nil
- },
- },
- },
- },
- }
-
- app.Run(os.Args)
- // Output:
- // Hello, Jeremy
-}
-
-func ExampleApp_Run_help() {
- // set args for examples sake
- os.Args = []string{"greet", "h", "describeit"}
-
- app := NewApp()
- app.Name = "greet"
- app.Flags = []Flag{
- StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
- }
- app.Commands = []Command{
- {
- Name: "describeit",
- Aliases: []string{"d"},
- Usage: "use it to see a description",
- Description: "This is how we describe describeit the function",
- Action: func(c *Context) error {
- fmt.Printf("i like to describe things")
- return nil
- },
- },
- }
- app.Run(os.Args)
- // Output:
- // NAME:
- // greet describeit - use it to see a description
- //
- // USAGE:
- // greet describeit [arguments...]
- //
- // DESCRIPTION:
- // This is how we describe describeit the function
-}
-
-func ExampleApp_Run_bashComplete() {
- // set args for examples sake
- os.Args = []string{"greet", "--generate-bash-completion"}
-
- app := NewApp()
- app.Name = "greet"
- app.EnableBashCompletion = true
- app.Commands = []Command{
- {
- Name: "describeit",
- Aliases: []string{"d"},
- Usage: "use it to see a description",
- Description: "This is how we describe describeit the function",
- Action: func(c *Context) error {
- fmt.Printf("i like to describe things")
- return nil
- },
- }, {
- Name: "next",
- Usage: "next example",
- Description: "more stuff to see when generating bash completion",
- Action: func(c *Context) error {
- fmt.Printf("the next example")
- return nil
- },
- },
- }
-
- app.Run(os.Args)
- // Output:
- // describeit
- // d
- // next
- // help
- // h
-}
-
-func TestApp_Run(t *testing.T) {
- s := ""
-
- app := NewApp()
- app.Action = func(c *Context) error {
- s = s + c.Args().First()
- return nil
- }
-
- err := app.Run([]string{"command", "foo"})
- expect(t, err, nil)
- err = app.Run([]string{"command", "bar"})
- expect(t, err, nil)
- expect(t, s, "foobar")
-}
-
-var commandAppTests = []struct {
- name string
- expected bool
-}{
- {"foobar", true},
- {"batbaz", true},
- {"b", true},
- {"f", true},
- {"bat", false},
- {"nothing", false},
-}
-
-func TestApp_Command(t *testing.T) {
- app := NewApp()
- fooCommand := Command{Name: "foobar", Aliases: []string{"f"}}
- batCommand := Command{Name: "batbaz", Aliases: []string{"b"}}
- app.Commands = []Command{
- fooCommand,
- batCommand,
- }
-
- for _, test := range commandAppTests {
- expect(t, app.Command(test.name) != nil, test.expected)
- }
-}
-
-func TestApp_CommandWithArgBeforeFlags(t *testing.T) {
- var parsedOption, firstArg string
-
- app := NewApp()
- command := Command{
- Name: "cmd",
- Flags: []Flag{
- StringFlag{Name: "option", Value: "", Usage: "some option"},
- },
- Action: func(c *Context) error {
- parsedOption = c.String("option")
- firstArg = c.Args().First()
- return nil
- },
- }
- app.Commands = []Command{command}
-
- app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"})
-
- expect(t, parsedOption, "my-option")
- expect(t, firstArg, "my-arg")
-}
-
-func TestApp_RunAsSubcommandParseFlags(t *testing.T) {
- var context *Context
-
- a := NewApp()
- a.Commands = []Command{
- {
- Name: "foo",
- Action: func(c *Context) error {
- context = c
- return nil
- },
- Flags: []Flag{
- StringFlag{
- Name: "lang",
- Value: "english",
- Usage: "language for the greeting",
- },
- },
- Before: func(_ *Context) error { return nil },
- },
- }
- a.Run([]string{"", "foo", "--lang", "spanish", "abcd"})
-
- expect(t, context.Args().Get(0), "abcd")
- expect(t, context.String("lang"), "spanish")
-}
-
-func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) {
- var parsedOption string
- var args []string
-
- app := NewApp()
- command := Command{
- Name: "cmd",
- Flags: []Flag{
- StringFlag{Name: "option", Value: "", Usage: "some option"},
- },
- Action: func(c *Context) error {
- parsedOption = c.String("option")
- args = c.Args()
- return nil
- },
- }
- app.Commands = []Command{command}
-
- app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"})
-
- expect(t, parsedOption, "my-option")
- expect(t, args[0], "my-arg")
- expect(t, args[1], "--")
- expect(t, args[2], "--notARealFlag")
-}
-
-func TestApp_CommandWithDash(t *testing.T) {
- var args []string
-
- app := NewApp()
- command := Command{
- Name: "cmd",
- Action: func(c *Context) error {
- args = c.Args()
- return nil
- },
- }
- app.Commands = []Command{command}
-
- app.Run([]string{"", "cmd", "my-arg", "-"})
-
- expect(t, args[0], "my-arg")
- expect(t, args[1], "-")
-}
-
-func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) {
- var args []string
-
- app := NewApp()
- command := Command{
- Name: "cmd",
- Action: func(c *Context) error {
- args = c.Args()
- return nil
- },
- }
- app.Commands = []Command{command}
-
- app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"})
-
- expect(t, args[0], "my-arg")
- expect(t, args[1], "--")
- expect(t, args[2], "notAFlagAtAll")
-}
-
-func TestApp_VisibleCommands(t *testing.T) {
- app := NewApp()
- app.Commands = []Command{
- {
- Name: "frob",
- HelpName: "foo frob",
- Action: func(_ *Context) error { return nil },
- },
- {
- Name: "frib",
- HelpName: "foo frib",
- Hidden: true,
- Action: func(_ *Context) error { return nil },
- },
- }
-
- app.Setup()
- expected := []Command{
- app.Commands[0],
- app.Commands[2], // help
- }
- actual := app.VisibleCommands()
- expect(t, len(expected), len(actual))
- for i, actualCommand := range actual {
- expectedCommand := expected[i]
-
- if expectedCommand.Action != nil {
- // comparing func addresses is OK!
- expect(t, fmt.Sprintf("%p", expectedCommand.Action), fmt.Sprintf("%p", actualCommand.Action))
- }
-
- // nil out funcs, as they cannot be compared
- // (https://github.com/golang/go/issues/8554)
- expectedCommand.Action = nil
- actualCommand.Action = nil
-
- if !reflect.DeepEqual(expectedCommand, actualCommand) {
- t.Errorf("expected\n%#v\n!=\n%#v", expectedCommand, actualCommand)
- }
- }
-}
-
-func TestApp_Float64Flag(t *testing.T) {
- var meters float64
-
- app := NewApp()
- app.Flags = []Flag{
- Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"},
- }
- app.Action = func(c *Context) error {
- meters = c.Float64("height")
- return nil
- }
-
- app.Run([]string{"", "--height", "1.93"})
- expect(t, meters, 1.93)
-}
-
-func TestApp_ParseSliceFlags(t *testing.T) {
- var parsedOption, firstArg string
- var parsedIntSlice []int
- var parsedStringSlice []string
-
- app := NewApp()
- command := Command{
- Name: "cmd",
- Flags: []Flag{
- IntSliceFlag{Name: "p", Value: &IntSlice{}, Usage: "set one or more ip addr"},
- StringSliceFlag{Name: "ip", Value: &StringSlice{}, Usage: "set one or more ports to open"},
- },
- Action: func(c *Context) error {
- parsedIntSlice = c.IntSlice("p")
- parsedStringSlice = c.StringSlice("ip")
- parsedOption = c.String("option")
- firstArg = c.Args().First()
- return nil
- },
- }
- app.Commands = []Command{command}
-
- app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"})
-
- IntsEquals := func(a, b []int) bool {
- if len(a) != len(b) {
- return false
- }
- for i, v := range a {
- if v != b[i] {
- return false
- }
- }
- return true
- }
-
- StrsEquals := func(a, b []string) bool {
- if len(a) != len(b) {
- return false
- }
- for i, v := range a {
- if v != b[i] {
- return false
- }
- }
- return true
- }
- var expectedIntSlice = []int{22, 80}
- var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"}
-
- if !IntsEquals(parsedIntSlice, expectedIntSlice) {
- t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice)
- }
-
- if !StrsEquals(parsedStringSlice, expectedStringSlice) {
- t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice)
- }
-}
-
-func TestApp_ParseSliceFlagsWithMissingValue(t *testing.T) {
- var parsedIntSlice []int
- var parsedStringSlice []string
-
- app := NewApp()
- command := Command{
- Name: "cmd",
- Flags: []Flag{
- IntSliceFlag{Name: "a", Usage: "set numbers"},
- StringSliceFlag{Name: "str", Usage: "set strings"},
- },
- Action: func(c *Context) error {
- parsedIntSlice = c.IntSlice("a")
- parsedStringSlice = c.StringSlice("str")
- return nil
- },
- }
- app.Commands = []Command{command}
-
- app.Run([]string{"", "cmd", "my-arg", "-a", "2", "-str", "A"})
-
- var expectedIntSlice = []int{2}
- var expectedStringSlice = []string{"A"}
-
- if parsedIntSlice[0] != expectedIntSlice[0] {
- t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0])
- }
-
- if parsedStringSlice[0] != expectedStringSlice[0] {
- t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0])
- }
-}
-
-func TestApp_DefaultStdout(t *testing.T) {
- app := NewApp()
-
- if app.Writer != os.Stdout {
- t.Error("Default output writer not set.")
- }
-}
-
-type mockWriter struct {
- written []byte
-}
-
-func (fw *mockWriter) Write(p []byte) (n int, err error) {
- if fw.written == nil {
- fw.written = p
- } else {
- fw.written = append(fw.written, p...)
- }
-
- return len(p), nil
-}
-
-func (fw *mockWriter) GetWritten() (b []byte) {
- return fw.written
-}
-
-func TestApp_SetStdout(t *testing.T) {
- w := &mockWriter{}
-
- app := NewApp()
- app.Name = "test"
- app.Writer = w
-
- err := app.Run([]string{"help"})
-
- if err != nil {
- t.Fatalf("Run error: %s", err)
- }
-
- if len(w.written) == 0 {
- t.Error("App did not write output to desired writer.")
- }
-}
-
-func TestApp_BeforeFunc(t *testing.T) {
- counts := &opCounts{}
- beforeError := fmt.Errorf("fail")
- var err error
-
- app := NewApp()
-
- app.Before = func(c *Context) error {
- counts.Total++
- counts.Before = counts.Total
- s := c.String("opt")
- if s == "fail" {
- return beforeError
- }
-
- return nil
- }
-
- app.Commands = []Command{
- {
- Name: "sub",
- Action: func(c *Context) error {
- counts.Total++
- counts.SubCommand = counts.Total
- return nil
- },
- },
- }
-
- app.Flags = []Flag{
- StringFlag{Name: "opt"},
- }
-
- // run with the Before() func succeeding
- err = app.Run([]string{"command", "--opt", "succeed", "sub"})
-
- if err != nil {
- t.Fatalf("Run error: %s", err)
- }
-
- if counts.Before != 1 {
- t.Errorf("Before() not executed when expected")
- }
-
- if counts.SubCommand != 2 {
- t.Errorf("Subcommand not executed when expected")
- }
-
- // reset
- counts = &opCounts{}
-
- // run with the Before() func failing
- err = app.Run([]string{"command", "--opt", "fail", "sub"})
-
- // should be the same error produced by the Before func
- if err != beforeError {
- t.Errorf("Run error expected, but not received")
- }
-
- if counts.Before != 1 {
- t.Errorf("Before() not executed when expected")
- }
-
- if counts.SubCommand != 0 {
- t.Errorf("Subcommand executed when NOT expected")
- }
-
- // reset
- counts = &opCounts{}
-
- afterError := errors.New("fail again")
- app.After = func(_ *Context) error {
- return afterError
- }
-
- // run with the Before() func failing, wrapped by After()
- err = app.Run([]string{"command", "--opt", "fail", "sub"})
-
- // should be the same error produced by the Before func
- if _, ok := err.(MultiError); !ok {
- t.Errorf("MultiError expected, but not received")
- }
-
- if counts.Before != 1 {
- t.Errorf("Before() not executed when expected")
- }
-
- if counts.SubCommand != 0 {
- t.Errorf("Subcommand executed when NOT expected")
- }
-}
-
-func TestApp_AfterFunc(t *testing.T) {
- counts := &opCounts{}
- afterError := fmt.Errorf("fail")
- var err error
-
- app := NewApp()
-
- app.After = func(c *Context) error {
- counts.Total++
- counts.After = counts.Total
- s := c.String("opt")
- if s == "fail" {
- return afterError
- }
-
- return nil
- }
-
- app.Commands = []Command{
- {
- Name: "sub",
- Action: func(c *Context) error {
- counts.Total++
- counts.SubCommand = counts.Total
- return nil
- },
- },
- }
-
- app.Flags = []Flag{
- StringFlag{Name: "opt"},
- }
-
- // run with the After() func succeeding
- err = app.Run([]string{"command", "--opt", "succeed", "sub"})
-
- if err != nil {
- t.Fatalf("Run error: %s", err)
- }
-
- if counts.After != 2 {
- t.Errorf("After() not executed when expected")
- }
-
- if counts.SubCommand != 1 {
- t.Errorf("Subcommand not executed when expected")
- }
-
- // reset
- counts = &opCounts{}
-
- // run with the Before() func failing
- err = app.Run([]string{"command", "--opt", "fail", "sub"})
-
- // should be the same error produced by the Before func
- if err != afterError {
- t.Errorf("Run error expected, but not received")
- }
-
- if counts.After != 2 {
- t.Errorf("After() not executed when expected")
- }
-
- if counts.SubCommand != 1 {
- t.Errorf("Subcommand not executed when expected")
- }
-}
-
-func TestAppNoHelpFlag(t *testing.T) {
- oldFlag := HelpFlag
- defer func() {
- HelpFlag = oldFlag
- }()
-
- HelpFlag = BoolFlag{}
-
- app := NewApp()
- app.Writer = ioutil.Discard
- err := app.Run([]string{"test", "-h"})
-
- if err != flag.ErrHelp {
- t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err)
- }
-}
-
-func TestAppHelpPrinter(t *testing.T) {
- oldPrinter := HelpPrinter
- defer func() {
- HelpPrinter = oldPrinter
- }()
-
- var wasCalled = false
- HelpPrinter = func(w io.Writer, template string, data interface{}) {
- wasCalled = true
- }
-
- app := NewApp()
- app.Run([]string{"-h"})
-
- if wasCalled == false {
- t.Errorf("Help printer expected to be called, but was not")
- }
-}
-
-func TestApp_VersionPrinter(t *testing.T) {
- oldPrinter := VersionPrinter
- defer func() {
- VersionPrinter = oldPrinter
- }()
-
- var wasCalled = false
- VersionPrinter = func(c *Context) {
- wasCalled = true
- }
-
- app := NewApp()
- ctx := NewContext(app, nil, nil)
- ShowVersion(ctx)
-
- if wasCalled == false {
- t.Errorf("Version printer expected to be called, but was not")
- }
-}
-
-func TestApp_CommandNotFound(t *testing.T) {
- counts := &opCounts{}
- app := NewApp()
-
- app.CommandNotFound = func(c *Context, command string) {
- counts.Total++
- counts.CommandNotFound = counts.Total
- }
-
- app.Commands = []Command{
- {
- Name: "bar",
- Action: func(c *Context) error {
- counts.Total++
- counts.SubCommand = counts.Total
- return nil
- },
- },
- }
-
- app.Run([]string{"command", "foo"})
-
- expect(t, counts.CommandNotFound, 1)
- expect(t, counts.SubCommand, 0)
- expect(t, counts.Total, 1)
-}
-
-func TestApp_OrderOfOperations(t *testing.T) {
- counts := &opCounts{}
-
- resetCounts := func() { counts = &opCounts{} }
-
- app := NewApp()
- app.EnableBashCompletion = true
- app.BashComplete = func(c *Context) {
- counts.Total++
- counts.BashComplete = counts.Total
- }
-
- app.OnUsageError = func(c *Context, err error, isSubcommand bool) error {
- counts.Total++
- counts.OnUsageError = counts.Total
- return errors.New("hay OnUsageError")
- }
-
- beforeNoError := func(c *Context) error {
- counts.Total++
- counts.Before = counts.Total
- return nil
- }
-
- beforeError := func(c *Context) error {
- counts.Total++
- counts.Before = counts.Total
- return errors.New("hay Before")
- }
-
- app.Before = beforeNoError
- app.CommandNotFound = func(c *Context, command string) {
- counts.Total++
- counts.CommandNotFound = counts.Total
- }
-
- afterNoError := func(c *Context) error {
- counts.Total++
- counts.After = counts.Total
- return nil
- }
-
- afterError := func(c *Context) error {
- counts.Total++
- counts.After = counts.Total
- return errors.New("hay After")
- }
-
- app.After = afterNoError
- app.Commands = []Command{
- {
- Name: "bar",
- Action: func(c *Context) error {
- counts.Total++
- counts.SubCommand = counts.Total
- return nil
- },
- },
- }
-
- app.Action = func(c *Context) error {
- counts.Total++
- counts.Action = counts.Total
- return nil
- }
-
- _ = app.Run([]string{"command", "--nope"})
- expect(t, counts.OnUsageError, 1)
- expect(t, counts.Total, 1)
-
- resetCounts()
-
- _ = app.Run([]string{"command", "--generate-bash-completion"})
- expect(t, counts.BashComplete, 1)
- expect(t, counts.Total, 1)
-
- resetCounts()
-
- oldOnUsageError := app.OnUsageError
- app.OnUsageError = nil
- _ = app.Run([]string{"command", "--nope"})
- expect(t, counts.Total, 0)
- app.OnUsageError = oldOnUsageError
-
- resetCounts()
-
- _ = app.Run([]string{"command", "foo"})
- expect(t, counts.OnUsageError, 0)
- expect(t, counts.Before, 1)
- expect(t, counts.CommandNotFound, 0)
- expect(t, counts.Action, 2)
- expect(t, counts.After, 3)
- expect(t, counts.Total, 3)
-
- resetCounts()
-
- app.Before = beforeError
- _ = app.Run([]string{"command", "bar"})
- expect(t, counts.OnUsageError, 0)
- expect(t, counts.Before, 1)
- expect(t, counts.After, 2)
- expect(t, counts.Total, 2)
- app.Before = beforeNoError
-
- resetCounts()
-
- app.After = nil
- _ = app.Run([]string{"command", "bar"})
- expect(t, counts.OnUsageError, 0)
- expect(t, counts.Before, 1)
- expect(t, counts.SubCommand, 2)
- expect(t, counts.Total, 2)
- app.After = afterNoError
-
- resetCounts()
-
- app.After = afterError
- err := app.Run([]string{"command", "bar"})
- if err == nil {
- t.Fatalf("expected a non-nil error")
- }
- expect(t, counts.OnUsageError, 0)
- expect(t, counts.Before, 1)
- expect(t, counts.SubCommand, 2)
- expect(t, counts.After, 3)
- expect(t, counts.Total, 3)
- app.After = afterNoError
-
- resetCounts()
-
- oldCommands := app.Commands
- app.Commands = nil
- _ = app.Run([]string{"command"})
- expect(t, counts.OnUsageError, 0)
- expect(t, counts.Before, 1)
- expect(t, counts.Action, 2)
- expect(t, counts.After, 3)
- expect(t, counts.Total, 3)
- app.Commands = oldCommands
-}
-
-func TestApp_Run_CommandWithSubcommandHasHelpTopic(t *testing.T) {
- var subcommandHelpTopics = [][]string{
- {"command", "foo", "--help"},
- {"command", "foo", "-h"},
- {"command", "foo", "help"},
- }
-
- for _, flagSet := range subcommandHelpTopics {
- t.Logf("==> checking with flags %v", flagSet)
-
- app := NewApp()
- buf := new(bytes.Buffer)
- app.Writer = buf
-
- subCmdBar := Command{
- Name: "bar",
- Usage: "does bar things",
- }
- subCmdBaz := Command{
- Name: "baz",
- Usage: "does baz things",
- }
- cmd := Command{
- Name: "foo",
- Description: "descriptive wall of text about how it does foo things",
- Subcommands: []Command{subCmdBar, subCmdBaz},
- Action: func(c *Context) error { return nil },
- }
-
- app.Commands = []Command{cmd}
- err := app.Run(flagSet)
-
- if err != nil {
- t.Error(err)
- }
-
- output := buf.String()
- t.Logf("output: %q\n", buf.Bytes())
-
- if strings.Contains(output, "No help topic for") {
- t.Errorf("expect a help topic, got none: \n%q", output)
- }
-
- for _, shouldContain := range []string{
- cmd.Name, cmd.Description,
- subCmdBar.Name, subCmdBar.Usage,
- subCmdBaz.Name, subCmdBaz.Usage,
- } {
- if !strings.Contains(output, shouldContain) {
- t.Errorf("want help to contain %q, did not: \n%q", shouldContain, output)
- }
- }
- }
-}
-
-func TestApp_Run_SubcommandFullPath(t *testing.T) {
- app := NewApp()
- buf := new(bytes.Buffer)
- app.Writer = buf
- app.Name = "command"
- subCmd := Command{
- Name: "bar",
- Usage: "does bar things",
- }
- cmd := Command{
- Name: "foo",
- Description: "foo commands",
- Subcommands: []Command{subCmd},
- }
- app.Commands = []Command{cmd}
-
- err := app.Run([]string{"command", "foo", "bar", "--help"})
- if err != nil {
- t.Error(err)
- }
-
- output := buf.String()
- if !strings.Contains(output, "command foo bar - does bar things") {
- t.Errorf("expected full path to subcommand: %s", output)
- }
- if !strings.Contains(output, "command foo bar [arguments...]") {
- t.Errorf("expected full path to subcommand: %s", output)
- }
-}
-
-func TestApp_Run_SubcommandHelpName(t *testing.T) {
- app := NewApp()
- buf := new(bytes.Buffer)
- app.Writer = buf
- app.Name = "command"
- subCmd := Command{
- Name: "bar",
- HelpName: "custom",
- Usage: "does bar things",
- }
- cmd := Command{
- Name: "foo",
- Description: "foo commands",
- Subcommands: []Command{subCmd},
- }
- app.Commands = []Command{cmd}
-
- err := app.Run([]string{"command", "foo", "bar", "--help"})
- if err != nil {
- t.Error(err)
- }
-
- output := buf.String()
- if !strings.Contains(output, "custom - does bar things") {
- t.Errorf("expected HelpName for subcommand: %s", output)
- }
- if !strings.Contains(output, "custom [arguments...]") {
- t.Errorf("expected HelpName to subcommand: %s", output)
- }
-}
-
-func TestApp_Run_CommandHelpName(t *testing.T) {
- app := NewApp()
- buf := new(bytes.Buffer)
- app.Writer = buf
- app.Name = "command"
- subCmd := Command{
- Name: "bar",
- Usage: "does bar things",
- }
- cmd := Command{
- Name: "foo",
- HelpName: "custom",
- Description: "foo commands",
- Subcommands: []Command{subCmd},
- }
- app.Commands = []Command{cmd}
-
- err := app.Run([]string{"command", "foo", "bar", "--help"})
- if err != nil {
- t.Error(err)
- }
-
- output := buf.String()
- if !strings.Contains(output, "command foo bar - does bar things") {
- t.Errorf("expected full path to subcommand: %s", output)
- }
- if !strings.Contains(output, "command foo bar [arguments...]") {
- t.Errorf("expected full path to subcommand: %s", output)
- }
-}
-
-func TestApp_Run_CommandSubcommandHelpName(t *testing.T) {
- app := NewApp()
- buf := new(bytes.Buffer)
- app.Writer = buf
- app.Name = "base"
- subCmd := Command{
- Name: "bar",
- HelpName: "custom",
- Usage: "does bar things",
- }
- cmd := Command{
- Name: "foo",
- Description: "foo commands",
- Subcommands: []Command{subCmd},
- }
- app.Commands = []Command{cmd}
-
- err := app.Run([]string{"command", "foo", "--help"})
- if err != nil {
- t.Error(err)
- }
-
- output := buf.String()
- if !strings.Contains(output, "base foo - foo commands") {
- t.Errorf("expected full path to subcommand: %s", output)
- }
- if !strings.Contains(output, "base foo command [command options] [arguments...]") {
- t.Errorf("expected full path to subcommand: %s", output)
- }
-}
-
-func TestApp_Run_Help(t *testing.T) {
- var helpArguments = [][]string{{"boom", "--help"}, {"boom", "-h"}, {"boom", "help"}}
-
- for _, args := range helpArguments {
- buf := new(bytes.Buffer)
-
- t.Logf("==> checking with arguments %v", args)
-
- app := NewApp()
- app.Name = "boom"
- app.Usage = "make an explosive entrance"
- app.Writer = buf
- app.Action = func(c *Context) error {
- buf.WriteString("boom I say!")
- return nil
- }
-
- err := app.Run(args)
- if err != nil {
- t.Error(err)
- }
-
- output := buf.String()
- t.Logf("output: %q\n", buf.Bytes())
-
- if !strings.Contains(output, "boom - make an explosive entrance") {
- t.Errorf("want help to contain %q, did not: \n%q", "boom - make an explosive entrance", output)
- }
- }
-}
-
-func TestApp_Run_Version(t *testing.T) {
- var versionArguments = [][]string{{"boom", "--version"}, {"boom", "-v"}}
-
- for _, args := range versionArguments {
- buf := new(bytes.Buffer)
-
- t.Logf("==> checking with arguments %v", args)
-
- app := NewApp()
- app.Name = "boom"
- app.Usage = "make an explosive entrance"
- app.Version = "0.1.0"
- app.Writer = buf
- app.Action = func(c *Context) error {
- buf.WriteString("boom I say!")
- return nil
- }
-
- err := app.Run(args)
- if err != nil {
- t.Error(err)
- }
-
- output := buf.String()
- t.Logf("output: %q\n", buf.Bytes())
-
- if !strings.Contains(output, "0.1.0") {
- t.Errorf("want version to contain %q, did not: \n%q", "0.1.0", output)
- }
- }
-}
-
-func TestApp_Run_Categories(t *testing.T) {
- app := NewApp()
- app.Name = "categories"
- app.HideHelp = true
- app.Commands = []Command{
- {
- Name: "command1",
- Category: "1",
- },
- {
- Name: "command2",
- Category: "1",
- },
- {
- Name: "command3",
- Category: "2",
- },
- }
- buf := new(bytes.Buffer)
- app.Writer = buf
-
- app.Run([]string{"categories"})
-
- expect := CommandCategories{
- &CommandCategory{
- Name: "1",
- Commands: []Command{
- app.Commands[0],
- app.Commands[1],
- },
- },
- &CommandCategory{
- Name: "2",
- Commands: []Command{
- app.Commands[2],
- },
- },
- }
- if !reflect.DeepEqual(app.Categories(), expect) {
- t.Fatalf("expected categories %#v, to equal %#v", app.Categories(), expect)
- }
-
- output := buf.String()
- t.Logf("output: %q\n", buf.Bytes())
-
- if !strings.Contains(output, "1:\n command1") {
- t.Errorf("want buffer to include category %q, did not: \n%q", "1:\n command1", output)
- }
-}
-
-func TestApp_VisibleCategories(t *testing.T) {
- app := NewApp()
- app.Name = "visible-categories"
- app.HideHelp = true
- app.Commands = []Command{
- {
- Name: "command1",
- Category: "1",
- HelpName: "foo command1",
- Hidden: true,
- },
- {
- Name: "command2",
- Category: "2",
- HelpName: "foo command2",
- },
- {
- Name: "command3",
- Category: "3",
- HelpName: "foo command3",
- },
- }
-
- expected := []*CommandCategory{
- {
- Name: "2",
- Commands: []Command{
- app.Commands[1],
- },
- },
- {
- Name: "3",
- Commands: []Command{
- app.Commands[2],
- },
- },
- }
-
- app.Setup()
- expect(t, expected, app.VisibleCategories())
-
- app = NewApp()
- app.Name = "visible-categories"
- app.HideHelp = true
- app.Commands = []Command{
- {
- Name: "command1",
- Category: "1",
- HelpName: "foo command1",
- Hidden: true,
- },
- {
- Name: "command2",
- Category: "2",
- HelpName: "foo command2",
- Hidden: true,
- },
- {
- Name: "command3",
- Category: "3",
- HelpName: "foo command3",
- },
- }
-
- expected = []*CommandCategory{
- {
- Name: "3",
- Commands: []Command{
- app.Commands[2],
- },
- },
- }
-
- app.Setup()
- expect(t, expected, app.VisibleCategories())
-
- app = NewApp()
- app.Name = "visible-categories"
- app.HideHelp = true
- app.Commands = []Command{
- {
- Name: "command1",
- Category: "1",
- HelpName: "foo command1",
- Hidden: true,
- },
- {
- Name: "command2",
- Category: "2",
- HelpName: "foo command2",
- Hidden: true,
- },
- {
- Name: "command3",
- Category: "3",
- HelpName: "foo command3",
- Hidden: true,
- },
- }
-
- expected = []*CommandCategory{}
-
- app.Setup()
- expect(t, expected, app.VisibleCategories())
-}
-
-func TestApp_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) {
- app := NewApp()
- app.Action = func(c *Context) error { return nil }
- app.Before = func(c *Context) error { return fmt.Errorf("before error") }
- app.After = func(c *Context) error { return fmt.Errorf("after error") }
-
- err := app.Run([]string{"foo"})
- if err == nil {
- t.Fatalf("expected to receive error from Run, got none")
- }
-
- if !strings.Contains(err.Error(), "before error") {
- t.Errorf("expected text of error from Before method, but got none in \"%v\"", err)
- }
- if !strings.Contains(err.Error(), "after error") {
- t.Errorf("expected text of error from After method, but got none in \"%v\"", err)
- }
-}
-
-func TestApp_Run_SubcommandDoesNotOverwriteErrorFromBefore(t *testing.T) {
- app := NewApp()
- app.Commands = []Command{
- {
- Subcommands: []Command{
- {
- Name: "sub",
- },
- },
- Name: "bar",
- Before: func(c *Context) error { return fmt.Errorf("before error") },
- After: func(c *Context) error { return fmt.Errorf("after error") },
- },
- }
-
- err := app.Run([]string{"foo", "bar"})
- if err == nil {
- t.Fatalf("expected to receive error from Run, got none")
- }
-
- if !strings.Contains(err.Error(), "before error") {
- t.Errorf("expected text of error from Before method, but got none in \"%v\"", err)
- }
- if !strings.Contains(err.Error(), "after error") {
- t.Errorf("expected text of error from After method, but got none in \"%v\"", err)
- }
-}
-
-func TestApp_OnUsageError_WithWrongFlagValue(t *testing.T) {
- app := NewApp()
- app.Flags = []Flag{
- IntFlag{Name: "flag"},
- }
- app.OnUsageError = func(c *Context, err error, isSubcommand bool) error {
- if isSubcommand {
- t.Errorf("Expect no subcommand")
- }
- if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") {
- t.Errorf("Expect an invalid value error, but got \"%v\"", err)
- }
- return errors.New("intercepted: " + err.Error())
- }
- app.Commands = []Command{
- {
- Name: "bar",
- },
- }
-
- err := app.Run([]string{"foo", "--flag=wrong"})
- if err == nil {
- t.Fatalf("expected to receive error from Run, got none")
- }
-
- if !strings.HasPrefix(err.Error(), "intercepted: invalid value") {
- t.Errorf("Expect an intercepted error, but got \"%v\"", err)
- }
-}
-
-func TestApp_OnUsageError_WithWrongFlagValue_ForSubcommand(t *testing.T) {
- app := NewApp()
- app.Flags = []Flag{
- IntFlag{Name: "flag"},
- }
- app.OnUsageError = func(c *Context, err error, isSubcommand bool) error {
- if isSubcommand {
- t.Errorf("Expect subcommand")
- }
- if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") {
- t.Errorf("Expect an invalid value error, but got \"%v\"", err)
- }
- return errors.New("intercepted: " + err.Error())
- }
- app.Commands = []Command{
- {
- Name: "bar",
- },
- }
-
- err := app.Run([]string{"foo", "--flag=wrong", "bar"})
- if err == nil {
- t.Fatalf("expected to receive error from Run, got none")
- }
-
- if !strings.HasPrefix(err.Error(), "intercepted: invalid value") {
- t.Errorf("Expect an intercepted error, but got \"%v\"", err)
- }
-}
-
-func TestHandleAction_WithNonFuncAction(t *testing.T) {
- app := NewApp()
- app.Action = 42
- err := HandleAction(app.Action, NewContext(app, flagSet(app.Name, app.Flags), nil))
-
- if err == nil {
- t.Fatalf("expected to receive error from Run, got none")
- }
-
- exitErr, ok := err.(*ExitError)
-
- if !ok {
- t.Fatalf("expected to receive a *ExitError")
- }
-
- if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action type") {
- t.Fatalf("expected an unknown Action error, but got: %v", exitErr.Error())
- }
-
- if exitErr.ExitCode() != 2 {
- t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode())
- }
-}
-
-func TestHandleAction_WithInvalidFuncSignature(t *testing.T) {
- app := NewApp()
- app.Action = func() string { return "" }
- err := HandleAction(app.Action, NewContext(app, flagSet(app.Name, app.Flags), nil))
-
- if err == nil {
- t.Fatalf("expected to receive error from Run, got none")
- }
-
- exitErr, ok := err.(*ExitError)
-
- if !ok {
- t.Fatalf("expected to receive a *ExitError")
- }
-
- if !strings.HasPrefix(exitErr.Error(), "ERROR unknown Action error") {
- t.Fatalf("expected an unknown Action error, but got: %v", exitErr.Error())
- }
-
- if exitErr.ExitCode() != 2 {
- t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode())
- }
-}
-
-func TestHandleAction_WithInvalidFuncReturnSignature(t *testing.T) {
- app := NewApp()
- app.Action = func(_ *Context) (int, error) { return 0, nil }
- err := HandleAction(app.Action, NewContext(app, flagSet(app.Name, app.Flags), nil))
-
- if err == nil {
- t.Fatalf("expected to receive error from Run, got none")
- }
-
- exitErr, ok := err.(*ExitError)
-
- if !ok {
- t.Fatalf("expected to receive a *ExitError")
- }
-
- if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action signature") {
- t.Fatalf("expected an invalid Action signature error, but got: %v", exitErr.Error())
- }
-
- if exitErr.ExitCode() != 2 {
- t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode())
- }
-}
-
-func TestHandleAction_WithUnknownPanic(t *testing.T) {
- defer func() { refute(t, recover(), nil) }()
-
- var fn ActionFunc
-
- app := NewApp()
- app.Action = func(ctx *Context) error {
- fn(ctx)
- return nil
- }
- HandleAction(app.Action, NewContext(app, flagSet(app.Name, app.Flags), nil))
-}
diff --git a/vendor/src/github.com/codegangsta/cli/appveyor.yml b/vendor/src/github.com/codegangsta/cli/appveyor.yml
deleted file mode 100644
index 698b188..0000000
--- a/vendor/src/github.com/codegangsta/cli/appveyor.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-version: "{build}"
-
-os: Windows Server 2012 R2
-
-clone_folder: c:\gopath\src\github.com\urfave\cli
-
-environment:
- GOPATH: C:\gopath
- GOVERSION: 1.6
- PYTHON: C:\Python27-x64
- PYTHON_VERSION: 2.7.x
- PYTHON_ARCH: 64
-
-install:
-- set PATH=%GOPATH%\bin;C:\go\bin;%PATH%
-- go version
-- go env
-- go get github.com/urfave/gfmrun/...
-- go get -v -t ./...
-
-build_script:
-- python runtests vet
-- python runtests test
-- python runtests gfmrun
diff --git a/vendor/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete b/vendor/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete
deleted file mode 100644
index 21a232f..0000000
--- a/vendor/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete
+++ /dev/null
@@ -1,14 +0,0 @@
-#! /bin/bash
-
-: ${PROG:=$(basename ${BASH_SOURCE})}
-
-_cli_bash_autocomplete() {
- local cur opts base
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
- COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
- return 0
- }
-
- complete -F _cli_bash_autocomplete $PROG
diff --git a/vendor/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete b/vendor/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete
deleted file mode 100644
index 5430a18..0000000
--- a/vendor/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete
+++ /dev/null
@@ -1,5 +0,0 @@
-autoload -U compinit && compinit
-autoload -U bashcompinit && bashcompinit
-
-script_dir=$(dirname $0)
-source ${script_dir}/bash_autocomplete
diff --git a/vendor/src/github.com/codegangsta/cli/category.go b/vendor/src/github.com/codegangsta/cli/category.go
deleted file mode 100644
index 1a60550..0000000
--- a/vendor/src/github.com/codegangsta/cli/category.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package cli
-
-// CommandCategories is a slice of *CommandCategory.
-type CommandCategories []*CommandCategory
-
-// CommandCategory is a category containing commands.
-type CommandCategory struct {
- Name string
- Commands Commands
-}
-
-func (c CommandCategories) Less(i, j int) bool {
- return c[i].Name < c[j].Name
-}
-
-func (c CommandCategories) Len() int {
- return len(c)
-}
-
-func (c CommandCategories) Swap(i, j int) {
- c[i], c[j] = c[j], c[i]
-}
-
-// AddCommand adds a command to a category.
-func (c CommandCategories) AddCommand(category string, command Command) CommandCategories {
- for _, commandCategory := range c {
- if commandCategory.Name == category {
- commandCategory.Commands = append(commandCategory.Commands, command)
- return c
- }
- }
- return append(c, &CommandCategory{Name: category, Commands: []Command{command}})
-}
-
-// VisibleCommands returns a slice of the Commands with Hidden=false
-func (c *CommandCategory) VisibleCommands() []Command {
- ret := []Command{}
- for _, command := range c.Commands {
- if !command.Hidden {
- ret = append(ret, command)
- }
- }
- return ret
-}
diff --git a/vendor/src/github.com/codegangsta/cli/cli.go b/vendor/src/github.com/codegangsta/cli/cli.go
deleted file mode 100644
index 74fd101..0000000
--- a/vendor/src/github.com/codegangsta/cli/cli.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Package cli provides a minimal framework for creating and organizing command line
-// Go applications. cli is designed to be easy to understand and write, the most simple
-// cli application can be written as follows:
-// func main() {
-// cli.NewApp().Run(os.Args)
-// }
-//
-// Of course this application does not do much, so let's make this an actual application:
-// func main() {
-// app := cli.NewApp()
-// app.Name = "greet"
-// app.Usage = "say a greeting"
-// app.Action = func(c *cli.Context) error {
-// println("Greetings")
-// }
-//
-// app.Run(os.Args)
-// }
-package cli
-
-//go:generate python ./generate-flag-types cli -i flag-types.json -o flag_generated.go
diff --git a/vendor/src/github.com/codegangsta/cli/command.go b/vendor/src/github.com/codegangsta/cli/command.go
deleted file mode 100644
index 96253b6..0000000
--- a/vendor/src/github.com/codegangsta/cli/command.go
+++ /dev/null
@@ -1,284 +0,0 @@
-package cli
-
-import (
- "fmt"
- "io/ioutil"
- "sort"
- "strings"
-)
-
-// Command is a subcommand for a cli.App.
-type Command struct {
- // The name of the command
- Name string
- // short name of the command. Typically one character (deprecated, use `Aliases`)
- ShortName string
- // A list of aliases for the command
- Aliases []string
- // A short description of the usage of this command
- Usage string
- // Custom text to show on USAGE section of help
- UsageText string
- // A longer explanation of how the command works
- Description string
- // A short description of the arguments of this command
- ArgsUsage string
- // The category the command is part of
- Category string
- // The function to call when checking for bash command completions
- BashComplete BashCompleteFunc
- // An action to execute before any sub-subcommands are run, but after the context is ready
- // If a non-nil error is returned, no sub-subcommands are run
- Before BeforeFunc
- // An action to execute after any subcommands are run, but after the subcommand has finished
- // It is run even if Action() panics
- After AfterFunc
- // The function to call when this command is invoked
- Action interface{}
- // TODO: replace `Action: interface{}` with `Action: ActionFunc` once some kind
- // of deprecation period has passed, maybe?
-
- // Execute this function if a usage error occurs.
- OnUsageError OnUsageErrorFunc
- // List of child commands
- Subcommands Commands
- // List of flags to parse
- Flags []Flag
- // Treat all flags as normal arguments if true
- SkipFlagParsing bool
- // Skip argument reordering which attempts to move flags before arguments,
- // but only works if all flags appear after all arguments. This behavior was
- // removed n version 2 since it only works under specific conditions so we
- // backport here by exposing it as an option for compatibility.
- SkipArgReorder bool
- // Boolean to hide built-in help command
- HideHelp bool
- // Boolean to hide this command from help or completion
- Hidden bool
-
- // Full name of command for help, defaults to full command name, including parent commands.
- HelpName string
- commandNamePath []string
-}
-
-// FullName returns the full name of the command.
-// For subcommands this ensures that parent commands are part of the command path
-func (c Command) FullName() string {
- if c.commandNamePath == nil {
- return c.Name
- }
- return strings.Join(c.commandNamePath, " ")
-}
-
-// Commands is a slice of Command
-type Commands []Command
-
-// Run invokes the command given the context, parses ctx.Args() to generate command-specific flags
-func (c Command) Run(ctx *Context) (err error) {
- if len(c.Subcommands) > 0 {
- return c.startApp(ctx)
- }
-
- if !c.HideHelp && (HelpFlag != BoolFlag{}) {
- // append help to flags
- c.Flags = append(
- c.Flags,
- HelpFlag,
- )
- }
-
- if ctx.App.EnableBashCompletion {
- c.Flags = append(c.Flags, BashCompletionFlag)
- }
-
- set := flagSet(c.Name, c.Flags)
- set.SetOutput(ioutil.Discard)
-
- if c.SkipFlagParsing {
- err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...))
- } else if !c.SkipArgReorder {
- firstFlagIndex := -1
- terminatorIndex := -1
- for index, arg := range ctx.Args() {
- if arg == "--" {
- terminatorIndex = index
- break
- } else if arg == "-" {
- // Do nothing. A dash alone is not really a flag.
- continue
- } else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 {
- firstFlagIndex = index
- }
- }
-
- if firstFlagIndex > -1 {
- args := ctx.Args()
- regularArgs := make([]string, len(args[1:firstFlagIndex]))
- copy(regularArgs, args[1:firstFlagIndex])
-
- var flagArgs []string
- if terminatorIndex > -1 {
- flagArgs = args[firstFlagIndex:terminatorIndex]
- regularArgs = append(regularArgs, args[terminatorIndex:]...)
- } else {
- flagArgs = args[firstFlagIndex:]
- }
-
- err = set.Parse(append(flagArgs, regularArgs...))
- } else {
- err = set.Parse(ctx.Args().Tail())
- }
- } else {
- err = set.Parse(ctx.Args().Tail())
- }
-
- if err != nil {
- if c.OnUsageError != nil {
- err := c.OnUsageError(ctx, err, false)
- HandleExitCoder(err)
- return err
- }
- fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.")
- fmt.Fprintln(ctx.App.Writer)
- ShowCommandHelp(ctx, c.Name)
- return err
- }
-
- nerr := normalizeFlags(c.Flags, set)
- if nerr != nil {
- fmt.Fprintln(ctx.App.Writer, nerr)
- fmt.Fprintln(ctx.App.Writer)
- ShowCommandHelp(ctx, c.Name)
- return nerr
- }
-
- context := NewContext(ctx.App, set, ctx)
-
- if checkCommandCompletions(context, c.Name) {
- return nil
- }
-
- if checkCommandHelp(context, c.Name) {
- return nil
- }
-
- if c.After != nil {
- defer func() {
- afterErr := c.After(context)
- if afterErr != nil {
- HandleExitCoder(err)
- if err != nil {
- err = NewMultiError(err, afterErr)
- } else {
- err = afterErr
- }
- }
- }()
- }
-
- if c.Before != nil {
- err = c.Before(context)
- if err != nil {
- fmt.Fprintln(ctx.App.Writer, err)
- fmt.Fprintln(ctx.App.Writer)
- ShowCommandHelp(ctx, c.Name)
- HandleExitCoder(err)
- return err
- }
- }
-
- context.Command = c
- err = HandleAction(c.Action, context)
-
- if err != nil {
- HandleExitCoder(err)
- }
- return err
-}
-
-// Names returns the names including short names and aliases.
-func (c Command) Names() []string {
- names := []string{c.Name}
-
- if c.ShortName != "" {
- names = append(names, c.ShortName)
- }
-
- return append(names, c.Aliases...)
-}
-
-// HasName returns true if Command.Name or Command.ShortName matches given name
-func (c Command) HasName(name string) bool {
- for _, n := range c.Names() {
- if n == name {
- return true
- }
- }
- return false
-}
-
-func (c Command) startApp(ctx *Context) error {
- app := NewApp()
- app.Metadata = ctx.App.Metadata
- // set the name and usage
- app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name)
- if c.HelpName == "" {
- app.HelpName = c.HelpName
- } else {
- app.HelpName = app.Name
- }
-
- if c.Description != "" {
- app.Usage = c.Description
- } else {
- app.Usage = c.Usage
- }
-
- // set CommandNotFound
- app.CommandNotFound = ctx.App.CommandNotFound
-
- // set the flags and commands
- app.Commands = c.Subcommands
- app.Flags = c.Flags
- app.HideHelp = c.HideHelp
-
- app.Version = ctx.App.Version
- app.HideVersion = ctx.App.HideVersion
- app.Compiled = ctx.App.Compiled
- app.Author = ctx.App.Author
- app.Email = ctx.App.Email
- app.Writer = ctx.App.Writer
-
- app.categories = CommandCategories{}
- for _, command := range c.Subcommands {
- app.categories = app.categories.AddCommand(command.Category, command)
- }
-
- sort.Sort(app.categories)
-
- // bash completion
- app.EnableBashCompletion = ctx.App.EnableBashCompletion
- if c.BashComplete != nil {
- app.BashComplete = c.BashComplete
- }
-
- // set the actions
- app.Before = c.Before
- app.After = c.After
- if c.Action != nil {
- app.Action = c.Action
- } else {
- app.Action = helpSubcommand.Action
- }
-
- for index, cc := range app.Commands {
- app.Commands[index].commandNamePath = []string{c.Name, cc.Name}
- }
-
- return app.RunAsSubcommand(ctx)
-}
-
-// VisibleFlags returns a slice of the Flags with Hidden=false
-func (c Command) VisibleFlags() []Flag {
- return visibleFlags(c.Flags)
-}
diff --git a/vendor/src/github.com/codegangsta/cli/command_test.go b/vendor/src/github.com/codegangsta/cli/command_test.go
deleted file mode 100644
index 5e0e8de..0000000
--- a/vendor/src/github.com/codegangsta/cli/command_test.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package cli
-
-import (
- "errors"
- "flag"
- "fmt"
- "io/ioutil"
- "strings"
- "testing"
-)
-
-func TestCommandFlagParsing(t *testing.T) {
- cases := []struct {
- testArgs []string
- skipFlagParsing bool
- skipArgReorder bool
- expectedErr error
- }{
- // Test normal "not ignoring flags" flow
- {[]string{"test-cmd", "blah", "blah", "-break"}, false, false, errors.New("flag provided but not defined: -break")},
-
- // Test no arg reorder
- {[]string{"test-cmd", "blah", "blah", "-break"}, false, true, nil},
-
- {[]string{"test-cmd", "blah", "blah"}, true, false, nil}, // Test SkipFlagParsing without any args that look like flags
- {[]string{"test-cmd", "blah", "-break"}, true, false, nil}, // Test SkipFlagParsing with random flag arg
- {[]string{"test-cmd", "blah", "-help"}, true, false, nil}, // Test SkipFlagParsing with "special" help flag arg
- }
-
- for _, c := range cases {
- app := NewApp()
- app.Writer = ioutil.Discard
- set := flag.NewFlagSet("test", 0)
- set.Parse(c.testArgs)
-
- context := NewContext(app, set, nil)
-
- command := Command{
- Name: "test-cmd",
- Aliases: []string{"tc"},
- Usage: "this is for testing",
- Description: "testing",
- Action: func(_ *Context) error { return nil },
- SkipFlagParsing: c.skipFlagParsing,
- SkipArgReorder: c.skipArgReorder,
- }
-
- err := command.Run(context)
-
- expect(t, err, c.expectedErr)
- expect(t, []string(context.Args()), c.testArgs)
- }
-}
-
-func TestCommand_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) {
- app := NewApp()
- app.Commands = []Command{
- {
- Name: "bar",
- Before: func(c *Context) error {
- return fmt.Errorf("before error")
- },
- After: func(c *Context) error {
- return fmt.Errorf("after error")
- },
- },
- }
-
- err := app.Run([]string{"foo", "bar"})
- if err == nil {
- t.Fatalf("expected to receive error from Run, got none")
- }
-
- if !strings.Contains(err.Error(), "before error") {
- t.Errorf("expected text of error from Before method, but got none in \"%v\"", err)
- }
- if !strings.Contains(err.Error(), "after error") {
- t.Errorf("expected text of error from After method, but got none in \"%v\"", err)
- }
-}
-
-func TestCommand_Run_BeforeSavesMetadata(t *testing.T) {
- var receivedMsgFromAction string
- var receivedMsgFromAfter string
-
- app := NewApp()
- app.Commands = []Command{
- {
- Name: "bar",
- Before: func(c *Context) error {
- c.App.Metadata["msg"] = "hello world"
- return nil
- },
- Action: func(c *Context) error {
- msg, ok := c.App.Metadata["msg"]
- if !ok {
- return errors.New("msg not found")
- }
- receivedMsgFromAction = msg.(string)
- return nil
- },
- After: func(c *Context) error {
- msg, ok := c.App.Metadata["msg"]
- if !ok {
- return errors.New("msg not found")
- }
- receivedMsgFromAfter = msg.(string)
- return nil
- },
- },
- }
-
- err := app.Run([]string{"foo", "bar"})
- if err != nil {
- t.Fatalf("expected no error from Run, got %s", err)
- }
-
- expectedMsg := "hello world"
-
- if receivedMsgFromAction != expectedMsg {
- t.Fatalf("expected msg from Action to match. Given: %q\nExpected: %q",
- receivedMsgFromAction, expectedMsg)
- }
- if receivedMsgFromAfter != expectedMsg {
- t.Fatalf("expected msg from After to match. Given: %q\nExpected: %q",
- receivedMsgFromAction, expectedMsg)
- }
-}
-
-func TestCommand_OnUsageError_WithWrongFlagValue(t *testing.T) {
- app := NewApp()
- app.Commands = []Command{
- {
- Name: "bar",
- Flags: []Flag{
- IntFlag{Name: "flag"},
- },
- OnUsageError: func(c *Context, err error, _ bool) error {
- if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") {
- t.Errorf("Expect an invalid value error, but got \"%v\"", err)
- }
- return errors.New("intercepted: " + err.Error())
- },
- },
- }
-
- err := app.Run([]string{"foo", "bar", "--flag=wrong"})
- if err == nil {
- t.Fatalf("expected to receive error from Run, got none")
- }
-
- if !strings.HasPrefix(err.Error(), "intercepted: invalid value") {
- t.Errorf("Expect an intercepted error, but got \"%v\"", err)
- }
-}
diff --git a/vendor/src/github.com/codegangsta/cli/context.go b/vendor/src/github.com/codegangsta/cli/context.go
deleted file mode 100644
index 492a742..0000000
--- a/vendor/src/github.com/codegangsta/cli/context.go
+++ /dev/null
@@ -1,264 +0,0 @@
-package cli
-
-import (
- "errors"
- "flag"
- "os"
- "reflect"
- "strings"
-)
-
-// Context is a type that is passed through to
-// each Handler action in a cli application. Context
-// can be used to retrieve context-specific Args and
-// parsed command-line options.
-type Context struct {
- App *App
- Command Command
- flagSet *flag.FlagSet
- setFlags map[string]bool
- parentContext *Context
-}
-
-// NewContext creates a new context. For use in when invoking an App or Command action.
-func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context {
- return &Context{App: app, flagSet: set, parentContext: parentCtx}
-}
-
-// NumFlags returns the number of flags set
-func (c *Context) NumFlags() int {
- return c.flagSet.NFlag()
-}
-
-// Set sets a context flag to a value.
-func (c *Context) Set(name, value string) error {
- return c.flagSet.Set(name, value)
-}
-
-// GlobalSet sets a context flag to a value on the global flagset
-func (c *Context) GlobalSet(name, value string) error {
- return globalContext(c).flagSet.Set(name, value)
-}
-
-// IsSet determines if the flag was actually set
-func (c *Context) IsSet(name string) bool {
- if c.setFlags == nil {
- c.setFlags = make(map[string]bool)
-
- c.flagSet.Visit(func(f *flag.Flag) {
- c.setFlags[f.Name] = true
- })
-
- c.flagSet.VisitAll(func(f *flag.Flag) {
- if _, ok := c.setFlags[f.Name]; ok {
- return
- }
- c.setFlags[f.Name] = false
- })
-
- // XXX hack to support IsSet for flags with EnvVar
- //
- // There isn't an easy way to do this with the current implementation since
- // whether a flag was set via an environment variable is very difficult to
- // determine here. Instead, we intend to introduce a backwards incompatible
- // change in version 2 to add `IsSet` to the Flag interface to push the
- // responsibility closer to where the information required to determine
- // whether a flag is set by non-standard means such as environment
- // variables is avaliable.
- //
- // See https://github.com/urfave/cli/issues/294 for additional discussion
- flags := c.Command.Flags
- if c.Command.Name == "" { // cannot == Command{} since it contains slice types
- if c.App != nil {
- flags = c.App.Flags
- }
- }
- for _, f := range flags {
- eachName(f.GetName(), func(name string) {
- if isSet, ok := c.setFlags[name]; isSet || !ok {
- return
- }
-
- val := reflect.ValueOf(f)
- if val.Kind() == reflect.Ptr {
- val = val.Elem()
- }
-
- envVarValue := val.FieldByName("EnvVar")
- if !envVarValue.IsValid() {
- return
- }
-
- eachName(envVarValue.String(), func(envVar string) {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- c.setFlags[name] = true
- return
- }
- })
- })
- }
- }
-
- return c.setFlags[name]
-}
-
-// GlobalIsSet determines if the global flag was actually set
-func (c *Context) GlobalIsSet(name string) bool {
- ctx := c
- if ctx.parentContext != nil {
- ctx = ctx.parentContext
- }
-
- for ; ctx != nil; ctx = ctx.parentContext {
- if ctx.IsSet(name) {
- return true
- }
- }
- return false
-}
-
-// FlagNames returns a slice of flag names used in this context.
-func (c *Context) FlagNames() (names []string) {
- for _, flag := range c.Command.Flags {
- name := strings.Split(flag.GetName(), ",")[0]
- if name == "help" {
- continue
- }
- names = append(names, name)
- }
- return
-}
-
-// GlobalFlagNames returns a slice of global flag names used by the app.
-func (c *Context) GlobalFlagNames() (names []string) {
- for _, flag := range c.App.Flags {
- name := strings.Split(flag.GetName(), ",")[0]
- if name == "help" || name == "version" {
- continue
- }
- names = append(names, name)
- }
- return
-}
-
-// Parent returns the parent context, if any
-func (c *Context) Parent() *Context {
- return c.parentContext
-}
-
-// Args contains apps console arguments
-type Args []string
-
-// Args returns the command line arguments associated with the context.
-func (c *Context) Args() Args {
- args := Args(c.flagSet.Args())
- return args
-}
-
-// NArg returns the number of the command line arguments.
-func (c *Context) NArg() int {
- return len(c.Args())
-}
-
-// Get returns the nth argument, or else a blank string
-func (a Args) Get(n int) string {
- if len(a) > n {
- return a[n]
- }
- return ""
-}
-
-// First returns the first argument, or else a blank string
-func (a Args) First() string {
- return a.Get(0)
-}
-
-// Tail returns the rest of the arguments (not the first one)
-// or else an empty string slice
-func (a Args) Tail() []string {
- if len(a) >= 2 {
- return []string(a)[1:]
- }
- return []string{}
-}
-
-// Present checks if there are any arguments present
-func (a Args) Present() bool {
- return len(a) != 0
-}
-
-// Swap swaps arguments at the given indexes
-func (a Args) Swap(from, to int) error {
- if from >= len(a) || to >= len(a) {
- return errors.New("index out of range")
- }
- a[from], a[to] = a[to], a[from]
- return nil
-}
-
-func globalContext(ctx *Context) *Context {
- if ctx == nil {
- return nil
- }
-
- for {
- if ctx.parentContext == nil {
- return ctx
- }
- ctx = ctx.parentContext
- }
-}
-
-func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet {
- if ctx.parentContext != nil {
- ctx = ctx.parentContext
- }
- for ; ctx != nil; ctx = ctx.parentContext {
- if f := ctx.flagSet.Lookup(name); f != nil {
- return ctx.flagSet
- }
- }
- return nil
-}
-
-func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) {
- switch ff.Value.(type) {
- case *StringSlice:
- default:
- set.Set(name, ff.Value.String())
- }
-}
-
-func normalizeFlags(flags []Flag, set *flag.FlagSet) error {
- visited := make(map[string]bool)
- set.Visit(func(f *flag.Flag) {
- visited[f.Name] = true
- })
- for _, f := range flags {
- parts := strings.Split(f.GetName(), ",")
- if len(parts) == 1 {
- continue
- }
- var ff *flag.Flag
- for _, name := range parts {
- name = strings.Trim(name, " ")
- if visited[name] {
- if ff != nil {
- return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name)
- }
- ff = set.Lookup(name)
- }
- }
- if ff == nil {
- continue
- }
- for _, name := range parts {
- name = strings.Trim(name, " ")
- if !visited[name] {
- copyFlag(name, ff, set)
- }
- }
- }
- return nil
-}
diff --git a/vendor/src/github.com/codegangsta/cli/context_test.go b/vendor/src/github.com/codegangsta/cli/context_test.go
deleted file mode 100644
index 0cf84d1..0000000
--- a/vendor/src/github.com/codegangsta/cli/context_test.go
+++ /dev/null
@@ -1,357 +0,0 @@
-package cli
-
-import (
- "flag"
- "os"
- "testing"
- "time"
-)
-
-func TestNewContext(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Int("myflag", 12, "doc")
- set.Int64("myflagInt64", int64(12), "doc")
- set.Uint("myflagUint", uint(93), "doc")
- set.Uint64("myflagUint64", uint64(93), "doc")
- set.Float64("myflag64", float64(17), "doc")
- globalSet := flag.NewFlagSet("test", 0)
- globalSet.Int("myflag", 42, "doc")
- globalSet.Int64("myflagInt64", int64(42), "doc")
- globalSet.Uint("myflagUint", uint(33), "doc")
- globalSet.Uint64("myflagUint64", uint64(33), "doc")
- globalSet.Float64("myflag64", float64(47), "doc")
- globalCtx := NewContext(nil, globalSet, nil)
- command := Command{Name: "mycommand"}
- c := NewContext(nil, set, globalCtx)
- c.Command = command
- expect(t, c.Int("myflag"), 12)
- expect(t, c.Int64("myflagInt64"), int64(12))
- expect(t, c.Uint("myflagUint"), uint(93))
- expect(t, c.Uint64("myflagUint64"), uint64(93))
- expect(t, c.Float64("myflag64"), float64(17))
- expect(t, c.GlobalInt("myflag"), 42)
- expect(t, c.GlobalInt64("myflagInt64"), int64(42))
- expect(t, c.GlobalUint("myflagUint"), uint(33))
- expect(t, c.GlobalUint64("myflagUint64"), uint64(33))
- expect(t, c.GlobalFloat64("myflag64"), float64(47))
- expect(t, c.Command.Name, "mycommand")
-}
-
-func TestContext_Int(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Int("myflag", 12, "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.Int("myflag"), 12)
-}
-
-func TestContext_Int64(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Int64("myflagInt64", 12, "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.Int64("myflagInt64"), int64(12))
-}
-
-func TestContext_Uint(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Uint("myflagUint", uint(13), "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.Uint("myflagUint"), uint(13))
-}
-
-func TestContext_Uint64(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Uint64("myflagUint64", uint64(9), "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.Uint64("myflagUint64"), uint64(9))
-}
-
-func TestContext_GlobalInt(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Int("myflag", 12, "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.GlobalInt("myflag"), 12)
- expect(t, c.GlobalInt("nope"), 0)
-}
-
-func TestContext_GlobalInt64(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Int64("myflagInt64", 12, "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.GlobalInt64("myflagInt64"), int64(12))
- expect(t, c.GlobalInt64("nope"), int64(0))
-}
-
-func TestContext_Float64(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Float64("myflag", float64(17), "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.Float64("myflag"), float64(17))
-}
-
-func TestContext_GlobalFloat64(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Float64("myflag", float64(17), "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.GlobalFloat64("myflag"), float64(17))
- expect(t, c.GlobalFloat64("nope"), float64(0))
-}
-
-func TestContext_Duration(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Duration("myflag", time.Duration(12*time.Second), "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.Duration("myflag"), time.Duration(12*time.Second))
-}
-
-func TestContext_String(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.String("myflag", "hello world", "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.String("myflag"), "hello world")
-}
-
-func TestContext_Bool(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Bool("myflag", false, "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.Bool("myflag"), false)
-}
-
-func TestContext_BoolT(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Bool("myflag", true, "doc")
- c := NewContext(nil, set, nil)
- expect(t, c.BoolT("myflag"), true)
-}
-
-func TestContext_GlobalBool(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
-
- globalSet := flag.NewFlagSet("test-global", 0)
- globalSet.Bool("myflag", false, "doc")
- globalCtx := NewContext(nil, globalSet, nil)
-
- c := NewContext(nil, set, globalCtx)
- expect(t, c.GlobalBool("myflag"), false)
- expect(t, c.GlobalBool("nope"), false)
-}
-
-func TestContext_GlobalBoolT(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
-
- globalSet := flag.NewFlagSet("test-global", 0)
- globalSet.Bool("myflag", true, "doc")
- globalCtx := NewContext(nil, globalSet, nil)
-
- c := NewContext(nil, set, globalCtx)
- expect(t, c.GlobalBoolT("myflag"), true)
- expect(t, c.GlobalBoolT("nope"), false)
-}
-
-func TestContext_Args(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Bool("myflag", false, "doc")
- c := NewContext(nil, set, nil)
- set.Parse([]string{"--myflag", "bat", "baz"})
- expect(t, len(c.Args()), 2)
- expect(t, c.Bool("myflag"), true)
-}
-
-func TestContext_NArg(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Bool("myflag", false, "doc")
- c := NewContext(nil, set, nil)
- set.Parse([]string{"--myflag", "bat", "baz"})
- expect(t, c.NArg(), 2)
-}
-
-func TestContext_IsSet(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Bool("myflag", false, "doc")
- set.String("otherflag", "hello world", "doc")
- globalSet := flag.NewFlagSet("test", 0)
- globalSet.Bool("myflagGlobal", true, "doc")
- globalCtx := NewContext(nil, globalSet, nil)
- c := NewContext(nil, set, globalCtx)
- set.Parse([]string{"--myflag", "bat", "baz"})
- globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"})
- expect(t, c.IsSet("myflag"), true)
- expect(t, c.IsSet("otherflag"), false)
- expect(t, c.IsSet("bogusflag"), false)
- expect(t, c.IsSet("myflagGlobal"), false)
-}
-
-// XXX Corresponds to hack in context.IsSet for flags with EnvVar field
-// Should be moved to `flag_test` in v2
-func TestContext_IsSet_fromEnv(t *testing.T) {
- var timeoutIsSet, tIsSet, noEnvVarIsSet, nIsSet bool
-
- os.Clearenv()
- os.Setenv("APP_TIMEOUT_SECONDS", "15.5")
- a := App{
- Flags: []Flag{
- Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"},
- Float64Flag{Name: "no-env-var, n"},
- },
- Action: func(ctx *Context) error {
- timeoutIsSet = ctx.IsSet("timeout")
- tIsSet = ctx.IsSet("t")
- noEnvVarIsSet = ctx.IsSet("no-env-var")
- nIsSet = ctx.IsSet("n")
- return nil
- },
- }
- a.Run([]string{"run"})
- expect(t, timeoutIsSet, true)
- expect(t, tIsSet, true)
- expect(t, noEnvVarIsSet, false)
- expect(t, nIsSet, false)
-}
-
-func TestContext_GlobalIsSet(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Bool("myflag", false, "doc")
- set.String("otherflag", "hello world", "doc")
- globalSet := flag.NewFlagSet("test", 0)
- globalSet.Bool("myflagGlobal", true, "doc")
- globalSet.Bool("myflagGlobalUnset", true, "doc")
- globalCtx := NewContext(nil, globalSet, nil)
- c := NewContext(nil, set, globalCtx)
- set.Parse([]string{"--myflag", "bat", "baz"})
- globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"})
- expect(t, c.GlobalIsSet("myflag"), false)
- expect(t, c.GlobalIsSet("otherflag"), false)
- expect(t, c.GlobalIsSet("bogusflag"), false)
- expect(t, c.GlobalIsSet("myflagGlobal"), true)
- expect(t, c.GlobalIsSet("myflagGlobalUnset"), false)
- expect(t, c.GlobalIsSet("bogusGlobal"), false)
-}
-
-// XXX Corresponds to hack in context.IsSet for flags with EnvVar field
-// Should be moved to `flag_test` in v2
-func TestContext_GlobalIsSet_fromEnv(t *testing.T) {
- var timeoutIsSet, tIsSet, noEnvVarIsSet, nIsSet bool
-
- os.Clearenv()
- os.Setenv("APP_TIMEOUT_SECONDS", "15.5")
- a := App{
- Flags: []Flag{
- Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"},
- Float64Flag{Name: "no-env-var, n"},
- },
- Commands: []Command{
- {
- Name: "hello",
- Action: func(ctx *Context) error {
- timeoutIsSet = ctx.GlobalIsSet("timeout")
- tIsSet = ctx.GlobalIsSet("t")
- noEnvVarIsSet = ctx.GlobalIsSet("no-env-var")
- nIsSet = ctx.GlobalIsSet("n")
- return nil
- },
- },
- },
- }
- a.Run([]string{"run", "hello"})
- expect(t, timeoutIsSet, true)
- expect(t, tIsSet, true)
- expect(t, noEnvVarIsSet, false)
- expect(t, nIsSet, false)
-}
-
-func TestContext_NumFlags(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Bool("myflag", false, "doc")
- set.String("otherflag", "hello world", "doc")
- globalSet := flag.NewFlagSet("test", 0)
- globalSet.Bool("myflagGlobal", true, "doc")
- globalCtx := NewContext(nil, globalSet, nil)
- c := NewContext(nil, set, globalCtx)
- set.Parse([]string{"--myflag", "--otherflag=foo"})
- globalSet.Parse([]string{"--myflagGlobal"})
- expect(t, c.NumFlags(), 2)
-}
-
-func TestContext_GlobalFlag(t *testing.T) {
- var globalFlag string
- var globalFlagSet bool
- app := NewApp()
- app.Flags = []Flag{
- StringFlag{Name: "global, g", Usage: "global"},
- }
- app.Action = func(c *Context) error {
- globalFlag = c.GlobalString("global")
- globalFlagSet = c.GlobalIsSet("global")
- return nil
- }
- app.Run([]string{"command", "-g", "foo"})
- expect(t, globalFlag, "foo")
- expect(t, globalFlagSet, true)
-
-}
-
-func TestContext_GlobalFlagsInSubcommands(t *testing.T) {
- subcommandRun := false
- parentFlag := false
- app := NewApp()
-
- app.Flags = []Flag{
- BoolFlag{Name: "debug, d", Usage: "Enable debugging"},
- }
-
- app.Commands = []Command{
- {
- Name: "foo",
- Flags: []Flag{
- BoolFlag{Name: "parent, p", Usage: "Parent flag"},
- },
- Subcommands: []Command{
- {
- Name: "bar",
- Action: func(c *Context) error {
- if c.GlobalBool("debug") {
- subcommandRun = true
- }
- if c.GlobalBool("parent") {
- parentFlag = true
- }
- return nil
- },
- },
- },
- },
- }
-
- app.Run([]string{"command", "-d", "foo", "-p", "bar"})
-
- expect(t, subcommandRun, true)
- expect(t, parentFlag, true)
-}
-
-func TestContext_Set(t *testing.T) {
- set := flag.NewFlagSet("test", 0)
- set.Int("int", 5, "an int")
- c := NewContext(nil, set, nil)
-
- c.Set("int", "1")
- expect(t, c.Int("int"), 1)
-}
-
-func TestContext_GlobalSet(t *testing.T) {
- gSet := flag.NewFlagSet("test", 0)
- gSet.Int("int", 5, "an int")
-
- set := flag.NewFlagSet("sub", 0)
- set.Int("int", 3, "an int")
-
- pc := NewContext(nil, gSet, nil)
- c := NewContext(nil, set, pc)
-
- c.Set("int", "1")
- expect(t, c.Int("int"), 1)
- expect(t, c.GlobalInt("int"), 5)
-
- c.GlobalSet("int", "1")
- expect(t, c.Int("int"), 1)
- expect(t, c.GlobalInt("int"), 1)
-}
diff --git a/vendor/src/github.com/codegangsta/cli/errors.go b/vendor/src/github.com/codegangsta/cli/errors.go
deleted file mode 100644
index c7d8c2f..0000000
--- a/vendor/src/github.com/codegangsta/cli/errors.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package cli
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
-)
-
-// OsExiter is the function used when the app exits. If not set defaults to os.Exit.
-var OsExiter = os.Exit
-
-// ErrWriter is used to write errors to the user. This can be anything
-// implementing the io.Writer interface and defaults to os.Stderr.
-var ErrWriter io.Writer = os.Stderr
-
-// MultiError is an error that wraps multiple errors.
-type MultiError struct {
- Errors []error
-}
-
-// NewMultiError creates a new MultiError. Pass in one or more errors.
-func NewMultiError(err ...error) MultiError {
- return MultiError{Errors: err}
-}
-
-// Error implents the error interface.
-func (m MultiError) Error() string {
- errs := make([]string, len(m.Errors))
- for i, err := range m.Errors {
- errs[i] = err.Error()
- }
-
- return strings.Join(errs, "\n")
-}
-
-// ExitCoder is the interface checked by `App` and `Command` for a custom exit
-// code
-type ExitCoder interface {
- error
- ExitCode() int
-}
-
-// ExitError fulfills both the builtin `error` interface and `ExitCoder`
-type ExitError struct {
- exitCode int
- message string
-}
-
-// NewExitError makes a new *ExitError
-func NewExitError(message string, exitCode int) *ExitError {
- return &ExitError{
- exitCode: exitCode,
- message: message,
- }
-}
-
-// Error returns the string message, fulfilling the interface required by
-// `error`
-func (ee *ExitError) Error() string {
- return ee.message
-}
-
-// ExitCode returns the exit code, fulfilling the interface required by
-// `ExitCoder`
-func (ee *ExitError) ExitCode() int {
- return ee.exitCode
-}
-
-// HandleExitCoder checks if the error fulfills the ExitCoder interface, and if
-// so prints the error to stderr (if it is non-empty) and calls OsExiter with the
-// given exit code. If the given error is a MultiError, then this func is
-// called on all members of the Errors slice.
-func HandleExitCoder(err error) {
- if err == nil {
- return
- }
-
- if exitErr, ok := err.(ExitCoder); ok {
- if err.Error() != "" {
- fmt.Fprintln(ErrWriter, err)
- }
- OsExiter(exitErr.ExitCode())
- return
- }
-
- if multiErr, ok := err.(MultiError); ok {
- for _, merr := range multiErr.Errors {
- HandleExitCoder(merr)
- }
- return
- }
-
- if err.Error() != "" {
- fmt.Fprintln(ErrWriter, err)
- }
- OsExiter(1)
-}
diff --git a/vendor/src/github.com/codegangsta/cli/errors_test.go b/vendor/src/github.com/codegangsta/cli/errors_test.go
deleted file mode 100644
index 04df031..0000000
--- a/vendor/src/github.com/codegangsta/cli/errors_test.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package cli
-
-import (
- "bytes"
- "errors"
- "testing"
-)
-
-func TestHandleExitCoder_nil(t *testing.T) {
- exitCode := 0
- called := false
-
- OsExiter = func(rc int) {
- exitCode = rc
- called = true
- }
-
- defer func() { OsExiter = fakeOsExiter }()
-
- HandleExitCoder(nil)
-
- expect(t, exitCode, 0)
- expect(t, called, false)
-}
-
-func TestHandleExitCoder_ExitCoder(t *testing.T) {
- exitCode := 0
- called := false
-
- OsExiter = func(rc int) {
- exitCode = rc
- called = true
- }
-
- defer func() { OsExiter = fakeOsExiter }()
-
- HandleExitCoder(NewExitError("galactic perimeter breach", 9))
-
- expect(t, exitCode, 9)
- expect(t, called, true)
-}
-
-func TestHandleExitCoder_MultiErrorWithExitCoder(t *testing.T) {
- exitCode := 0
- called := false
-
- OsExiter = func(rc int) {
- exitCode = rc
- called = true
- }
-
- defer func() { OsExiter = fakeOsExiter }()
-
- exitErr := NewExitError("galactic perimeter breach", 9)
- err := NewMultiError(errors.New("wowsa"), errors.New("egad"), exitErr)
- HandleExitCoder(err)
-
- expect(t, exitCode, 9)
- expect(t, called, true)
-}
-
-func TestHandleExitCoder_ErrorWithMessage(t *testing.T) {
- exitCode := 0
- called := false
-
- OsExiter = func(rc int) {
- exitCode = rc
- called = true
- }
- ErrWriter = &bytes.Buffer{}
-
- defer func() {
- OsExiter = fakeOsExiter
- ErrWriter = fakeErrWriter
- }()
-
- err := errors.New("gourd havens")
- HandleExitCoder(err)
-
- expect(t, exitCode, 1)
- expect(t, called, true)
- expect(t, ErrWriter.(*bytes.Buffer).String(), "gourd havens\n")
-}
-
-func TestHandleExitCoder_ErrorWithoutMessage(t *testing.T) {
- exitCode := 0
- called := false
-
- OsExiter = func(rc int) {
- exitCode = rc
- called = true
- }
- ErrWriter = &bytes.Buffer{}
-
- defer func() {
- OsExiter = fakeOsExiter
- ErrWriter = fakeErrWriter
- }()
-
- err := errors.New("")
- HandleExitCoder(err)
-
- expect(t, exitCode, 1)
- expect(t, called, true)
- expect(t, ErrWriter.(*bytes.Buffer).String(), "")
-}
diff --git a/vendor/src/github.com/codegangsta/cli/flag-types.json b/vendor/src/github.com/codegangsta/cli/flag-types.json
deleted file mode 100644
index 1223107..0000000
--- a/vendor/src/github.com/codegangsta/cli/flag-types.json
+++ /dev/null
@@ -1,93 +0,0 @@
-[
- {
- "name": "Bool",
- "type": "bool",
- "value": false,
- "context_default": "false",
- "parser": "strconv.ParseBool(f.Value.String())"
- },
- {
- "name": "BoolT",
- "type": "bool",
- "value": false,
- "doctail": " that is true by default",
- "context_default": "false",
- "parser": "strconv.ParseBool(f.Value.String())"
- },
- {
- "name": "Duration",
- "type": "time.Duration",
- "doctail": " (see https://golang.org/pkg/time/#ParseDuration)",
- "context_default": "0",
- "parser": "time.ParseDuration(f.Value.String())"
- },
- {
- "name": "Float64",
- "type": "float64",
- "context_default": "0",
- "parser": "strconv.ParseFloat(f.Value.String(), 64)"
- },
- {
- "name": "Generic",
- "type": "Generic",
- "dest": false,
- "context_default": "nil",
- "context_type": "interface{}"
- },
- {
- "name": "Int64",
- "type": "int64",
- "context_default": "0",
- "parser": "strconv.ParseInt(f.Value.String(), 0, 64)"
- },
- {
- "name": "Int",
- "type": "int",
- "context_default": "0",
- "parser": "strconv.ParseInt(f.Value.String(), 0, 64)",
- "parser_cast": "int(parsed)"
- },
- {
- "name": "IntSlice",
- "type": "*IntSlice",
- "dest": false,
- "context_default": "nil",
- "context_type": "[]int",
- "parser": "(f.Value.(*IntSlice)).Value(), error(nil)"
- },
- {
- "name": "Int64Slice",
- "type": "*Int64Slice",
- "dest": false,
- "context_default": "nil",
- "context_type": "[]int64",
- "parser": "(f.Value.(*Int64Slice)).Value(), error(nil)"
- },
- {
- "name": "String",
- "type": "string",
- "context_default": "\"\"",
- "parser": "f.Value.String(), error(nil)"
- },
- {
- "name": "StringSlice",
- "type": "*StringSlice",
- "dest": false,
- "context_default": "nil",
- "context_type": "[]string",
- "parser": "(f.Value.(*StringSlice)).Value(), error(nil)"
- },
- {
- "name": "Uint64",
- "type": "uint64",
- "context_default": "0",
- "parser": "strconv.ParseUint(f.Value.String(), 0, 64)"
- },
- {
- "name": "Uint",
- "type": "uint",
- "context_default": "0",
- "parser": "strconv.ParseUint(f.Value.String(), 0, 64)",
- "parser_cast": "uint(parsed)"
- }
-]
diff --git a/vendor/src/github.com/codegangsta/cli/flag.go b/vendor/src/github.com/codegangsta/cli/flag.go
deleted file mode 100644
index e748c02..0000000
--- a/vendor/src/github.com/codegangsta/cli/flag.go
+++ /dev/null
@@ -1,621 +0,0 @@
-package cli
-
-import (
- "flag"
- "fmt"
- "os"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "time"
-)
-
-const defaultPlaceholder = "value"
-
-// BashCompletionFlag enables bash-completion for all commands and subcommands
-var BashCompletionFlag = BoolFlag{
- Name: "generate-bash-completion",
- Hidden: true,
-}
-
-// VersionFlag prints the version for the application
-var VersionFlag = BoolFlag{
- Name: "version, v",
- Usage: "print the version",
-}
-
-// HelpFlag prints the help for all commands and subcommands
-// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand
-// unless HideHelp is set to true)
-var HelpFlag = BoolFlag{
- Name: "help, h",
- Usage: "show help",
-}
-
-// FlagStringer converts a flag definition to a string. This is used by help
-// to display a flag.
-var FlagStringer FlagStringFunc = stringifyFlag
-
-// Flag is a common interface related to parsing flags in cli.
-// For more advanced flag parsing techniques, it is recommended that
-// this interface be implemented.
-type Flag interface {
- fmt.Stringer
- // Apply Flag settings to the given flag set
- Apply(*flag.FlagSet)
- GetName() string
-}
-
-func flagSet(name string, flags []Flag) *flag.FlagSet {
- set := flag.NewFlagSet(name, flag.ContinueOnError)
-
- for _, f := range flags {
- f.Apply(set)
- }
- return set
-}
-
-func eachName(longName string, fn func(string)) {
- parts := strings.Split(longName, ",")
- for _, name := range parts {
- name = strings.Trim(name, " ")
- fn(name)
- }
-}
-
-// Generic is a generic parseable type identified by a specific flag
-type Generic interface {
- Set(value string) error
- String() string
-}
-
-// Apply takes the flagset and calls Set on the generic flag with the value
-// provided by the user for parsing by the flag
-func (f GenericFlag) Apply(set *flag.FlagSet) {
- val := f.Value
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- val.Set(envVal)
- break
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- set.Var(f.Value, name, f.Usage)
- })
-}
-
-// StringSlice is an opaque type for []string to satisfy flag.Value
-type StringSlice []string
-
-// Set appends the string value to the list of values
-func (f *StringSlice) Set(value string) error {
- *f = append(*f, value)
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (f *StringSlice) String() string {
- return fmt.Sprintf("%s", *f)
-}
-
-// Value returns the slice of strings set by this flag
-func (f *StringSlice) Value() []string {
- return *f
-}
-
-// Apply populates the flag given the flag set and environment
-func (f StringSliceFlag) Apply(set *flag.FlagSet) {
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- newVal := &StringSlice{}
- for _, s := range strings.Split(envVal, ",") {
- s = strings.TrimSpace(s)
- newVal.Set(s)
- }
- f.Value = newVal
- break
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Value == nil {
- f.Value = &StringSlice{}
- }
- set.Var(f.Value, name, f.Usage)
- })
-}
-
-// IntSlice is an opaque type for []int to satisfy flag.Value
-type IntSlice []int
-
-// Set parses the value into an integer and appends it to the list of values
-func (f *IntSlice) Set(value string) error {
- tmp, err := strconv.Atoi(value)
- if err != nil {
- return err
- }
- *f = append(*f, tmp)
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (f *IntSlice) String() string {
- return fmt.Sprintf("%#v", *f)
-}
-
-// Value returns the slice of ints set by this flag
-func (f *IntSlice) Value() []int {
- return *f
-}
-
-// Apply populates the flag given the flag set and environment
-func (f IntSliceFlag) Apply(set *flag.FlagSet) {
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- newVal := &IntSlice{}
- for _, s := range strings.Split(envVal, ",") {
- s = strings.TrimSpace(s)
- err := newVal.Set(s)
- if err != nil {
- fmt.Fprintf(ErrWriter, err.Error())
- }
- }
- f.Value = newVal
- break
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Value == nil {
- f.Value = &IntSlice{}
- }
- set.Var(f.Value, name, f.Usage)
- })
-}
-
-// Int64Slice is an opaque type for []int to satisfy flag.Value
-type Int64Slice []int64
-
-// Set parses the value into an integer and appends it to the list of values
-func (f *Int64Slice) Set(value string) error {
- tmp, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- return err
- }
- *f = append(*f, tmp)
- return nil
-}
-
-// String returns a readable representation of this value (for usage defaults)
-func (f *Int64Slice) String() string {
- return fmt.Sprintf("%#v", *f)
-}
-
-// Value returns the slice of ints set by this flag
-func (f *Int64Slice) Value() []int64 {
- return *f
-}
-
-// Apply populates the flag given the flag set and environment
-func (f Int64SliceFlag) Apply(set *flag.FlagSet) {
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- newVal := &Int64Slice{}
- for _, s := range strings.Split(envVal, ",") {
- s = strings.TrimSpace(s)
- err := newVal.Set(s)
- if err != nil {
- fmt.Fprintf(ErrWriter, err.Error())
- }
- }
- f.Value = newVal
- break
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Value == nil {
- f.Value = &Int64Slice{}
- }
- set.Var(f.Value, name, f.Usage)
- })
-}
-
-// Apply populates the flag given the flag set and environment
-func (f BoolFlag) Apply(set *flag.FlagSet) {
- val := false
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- envValBool, err := strconv.ParseBool(envVal)
- if err == nil {
- val = envValBool
- }
- break
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.BoolVar(f.Destination, name, val, f.Usage)
- return
- }
- set.Bool(name, val, f.Usage)
- })
-}
-
-// Apply populates the flag given the flag set and environment
-func (f BoolTFlag) Apply(set *flag.FlagSet) {
- val := true
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- envValBool, err := strconv.ParseBool(envVal)
- if err == nil {
- val = envValBool
- break
- }
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.BoolVar(f.Destination, name, val, f.Usage)
- return
- }
- set.Bool(name, val, f.Usage)
- })
-}
-
-// Apply populates the flag given the flag set and environment
-func (f StringFlag) Apply(set *flag.FlagSet) {
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- f.Value = envVal
- break
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.StringVar(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.String(name, f.Value, f.Usage)
- })
-}
-
-// Apply populates the flag given the flag set and environment
-func (f IntFlag) Apply(set *flag.FlagSet) {
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- envValInt, err := strconv.ParseInt(envVal, 0, 64)
- if err == nil {
- f.Value = int(envValInt)
- break
- }
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.IntVar(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Int(name, f.Value, f.Usage)
- })
-}
-
-// Apply populates the flag given the flag set and environment
-func (f Int64Flag) Apply(set *flag.FlagSet) {
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- envValInt, err := strconv.ParseInt(envVal, 0, 64)
- if err == nil {
- f.Value = envValInt
- break
- }
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.Int64Var(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Int64(name, f.Value, f.Usage)
- })
-}
-
-// Apply populates the flag given the flag set and environment
-func (f UintFlag) Apply(set *flag.FlagSet) {
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- envValInt, err := strconv.ParseUint(envVal, 0, 64)
- if err == nil {
- f.Value = uint(envValInt)
- break
- }
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.UintVar(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Uint(name, f.Value, f.Usage)
- })
-}
-
-// Apply populates the flag given the flag set and environment
-func (f Uint64Flag) Apply(set *flag.FlagSet) {
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- envValInt, err := strconv.ParseUint(envVal, 0, 64)
- if err == nil {
- f.Value = uint64(envValInt)
- break
- }
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.Uint64Var(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Uint64(name, f.Value, f.Usage)
- })
-}
-
-// Apply populates the flag given the flag set and environment
-func (f DurationFlag) Apply(set *flag.FlagSet) {
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- envValDuration, err := time.ParseDuration(envVal)
- if err == nil {
- f.Value = envValDuration
- break
- }
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.DurationVar(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Duration(name, f.Value, f.Usage)
- })
-}
-
-// Apply populates the flag given the flag set and environment
-func (f Float64Flag) Apply(set *flag.FlagSet) {
- if f.EnvVar != "" {
- for _, envVar := range strings.Split(f.EnvVar, ",") {
- envVar = strings.TrimSpace(envVar)
- if envVal := os.Getenv(envVar); envVal != "" {
- envValFloat, err := strconv.ParseFloat(envVal, 10)
- if err == nil {
- f.Value = float64(envValFloat)
- }
- }
- }
- }
-
- eachName(f.Name, func(name string) {
- if f.Destination != nil {
- set.Float64Var(f.Destination, name, f.Value, f.Usage)
- return
- }
- set.Float64(name, f.Value, f.Usage)
- })
-}
-
-func visibleFlags(fl []Flag) []Flag {
- visible := []Flag{}
- for _, flag := range fl {
- if !flagValue(flag).FieldByName("Hidden").Bool() {
- visible = append(visible, flag)
- }
- }
- return visible
-}
-
-func prefixFor(name string) (prefix string) {
- if len(name) == 1 {
- prefix = "-"
- } else {
- prefix = "--"
- }
-
- return
-}
-
-// Returns the placeholder, if any, and the unquoted usage string.
-func unquoteUsage(usage string) (string, string) {
- for i := 0; i < len(usage); i++ {
- if usage[i] == '`' {
- for j := i + 1; j < len(usage); j++ {
- if usage[j] == '`' {
- name := usage[i+1 : j]
- usage = usage[:i] + name + usage[j+1:]
- return name, usage
- }
- }
- break
- }
- }
- return "", usage
-}
-
-func prefixedNames(fullName, placeholder string) string {
- var prefixed string
- parts := strings.Split(fullName, ",")
- for i, name := range parts {
- name = strings.Trim(name, " ")
- prefixed += prefixFor(name) + name
- if placeholder != "" {
- prefixed += " " + placeholder
- }
- if i < len(parts)-1 {
- prefixed += ", "
- }
- }
- return prefixed
-}
-
-func withEnvHint(envVar, str string) string {
- envText := ""
- if envVar != "" {
- prefix := "$"
- suffix := ""
- sep := ", $"
- if runtime.GOOS == "windows" {
- prefix = "%"
- suffix = "%"
- sep = "%, %"
- }
- envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix)
- }
- return str + envText
-}
-
-func flagValue(f Flag) reflect.Value {
- fv := reflect.ValueOf(f)
- for fv.Kind() == reflect.Ptr {
- fv = reflect.Indirect(fv)
- }
- return fv
-}
-
-func stringifyFlag(f Flag) string {
- fv := flagValue(f)
-
- switch f.(type) {
- case IntSliceFlag:
- return withEnvHint(fv.FieldByName("EnvVar").String(),
- stringifyIntSliceFlag(f.(IntSliceFlag)))
- case Int64SliceFlag:
- return withEnvHint(fv.FieldByName("EnvVar").String(),
- stringifyInt64SliceFlag(f.(Int64SliceFlag)))
- case StringSliceFlag:
- return withEnvHint(fv.FieldByName("EnvVar").String(),
- stringifyStringSliceFlag(f.(StringSliceFlag)))
- }
-
- placeholder, usage := unquoteUsage(fv.FieldByName("Usage").String())
-
- needsPlaceholder := false
- defaultValueString := ""
- val := fv.FieldByName("Value")
-
- if val.IsValid() {
- needsPlaceholder = true
- defaultValueString = fmt.Sprintf(" (default: %v)", val.Interface())
-
- if val.Kind() == reflect.String && val.String() != "" {
- defaultValueString = fmt.Sprintf(" (default: %q)", val.String())
- }
- }
-
- if defaultValueString == " (default: )" {
- defaultValueString = ""
- }
-
- if needsPlaceholder && placeholder == "" {
- placeholder = defaultPlaceholder
- }
-
- usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultValueString))
-
- return withEnvHint(fv.FieldByName("EnvVar").String(),
- fmt.Sprintf("%s\t%s", prefixedNames(fv.FieldByName("Name").String(), placeholder), usageWithDefault))
-}
-
-func stringifyIntSliceFlag(f IntSliceFlag) string {
- defaultVals := []string{}
- if f.Value != nil && len(f.Value.Value()) > 0 {
- for _, i := range f.Value.Value() {
- defaultVals = append(defaultVals, fmt.Sprintf("%d", i))
- }
- }
-
- return stringifySliceFlag(f.Usage, f.Name, defaultVals)
-}
-
-func stringifyInt64SliceFlag(f Int64SliceFlag) string {
- defaultVals := []string{}
- if f.Value != nil && len(f.Value.Value()) > 0 {
- for _, i := range f.Value.Value() {
- defaultVals = append(defaultVals, fmt.Sprintf("%d", i))
- }
- }
-
- return stringifySliceFlag(f.Usage, f.Name, defaultVals)
-}
-
-func stringifyStringSliceFlag(f StringSliceFlag) string {
- defaultVals := []string{}
- if f.Value != nil && len(f.Value.Value()) > 0 {
- for _, s := range f.Value.Value() {
- if len(s) > 0 {
- defaultVals = append(defaultVals, fmt.Sprintf("%q", s))
- }
- }
- }
-
- return stringifySliceFlag(f.Usage, f.Name, defaultVals)
-}
-
-func stringifySliceFlag(usage, name string, defaultVals []string) string {
- placeholder, usage := unquoteUsage(usage)
- if placeholder == "" {
- placeholder = defaultPlaceholder
- }
-
- defaultVal := ""
- if len(defaultVals) > 0 {
- defaultVal = fmt.Sprintf(" (default: %s)", strings.Join(defaultVals, ", "))
- }
-
- usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal))
- return fmt.Sprintf("%s\t%s", prefixedNames(name, placeholder), usageWithDefault)
-}
diff --git a/vendor/src/github.com/codegangsta/cli/flag_generated.go b/vendor/src/github.com/codegangsta/cli/flag_generated.go
deleted file mode 100644
index 491b619..0000000
--- a/vendor/src/github.com/codegangsta/cli/flag_generated.go
+++ /dev/null
@@ -1,627 +0,0 @@
-package cli
-
-import (
- "flag"
- "strconv"
- "time"
-)
-
-// WARNING: This file is generated!
-
-// BoolFlag is a flag with type bool
-type BoolFlag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Destination *bool
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f BoolFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f BoolFlag) GetName() string {
- return f.Name
-}
-
-// Bool looks up the value of a local BoolFlag, returns
-// false if not found
-func (c *Context) Bool(name string) bool {
- return lookupBool(name, c.flagSet)
-}
-
-// GlobalBool looks up the value of a global BoolFlag, returns
-// false if not found
-func (c *Context) GlobalBool(name string) bool {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupBool(name, fs)
- }
- return false
-}
-
-func lookupBool(name string, set *flag.FlagSet) bool {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseBool(f.Value.String())
- if err != nil {
- return false
- }
- return parsed
- }
- return false
-}
-
-// BoolTFlag is a flag with type bool that is true by default
-type BoolTFlag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Destination *bool
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f BoolTFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f BoolTFlag) GetName() string {
- return f.Name
-}
-
-// BoolT looks up the value of a local BoolTFlag, returns
-// false if not found
-func (c *Context) BoolT(name string) bool {
- return lookupBoolT(name, c.flagSet)
-}
-
-// GlobalBoolT looks up the value of a global BoolTFlag, returns
-// false if not found
-func (c *Context) GlobalBoolT(name string) bool {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupBoolT(name, fs)
- }
- return false
-}
-
-func lookupBoolT(name string, set *flag.FlagSet) bool {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseBool(f.Value.String())
- if err != nil {
- return false
- }
- return parsed
- }
- return false
-}
-
-// DurationFlag is a flag with type time.Duration (see https://golang.org/pkg/time/#ParseDuration)
-type DurationFlag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value time.Duration
- Destination *time.Duration
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f DurationFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f DurationFlag) GetName() string {
- return f.Name
-}
-
-// Duration looks up the value of a local DurationFlag, returns
-// 0 if not found
-func (c *Context) Duration(name string) time.Duration {
- return lookupDuration(name, c.flagSet)
-}
-
-// GlobalDuration looks up the value of a global DurationFlag, returns
-// 0 if not found
-func (c *Context) GlobalDuration(name string) time.Duration {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupDuration(name, fs)
- }
- return 0
-}
-
-func lookupDuration(name string, set *flag.FlagSet) time.Duration {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := time.ParseDuration(f.Value.String())
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
-
-// Float64Flag is a flag with type float64
-type Float64Flag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value float64
- Destination *float64
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f Float64Flag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f Float64Flag) GetName() string {
- return f.Name
-}
-
-// Float64 looks up the value of a local Float64Flag, returns
-// 0 if not found
-func (c *Context) Float64(name string) float64 {
- return lookupFloat64(name, c.flagSet)
-}
-
-// GlobalFloat64 looks up the value of a global Float64Flag, returns
-// 0 if not found
-func (c *Context) GlobalFloat64(name string) float64 {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupFloat64(name, fs)
- }
- return 0
-}
-
-func lookupFloat64(name string, set *flag.FlagSet) float64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseFloat(f.Value.String(), 64)
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
-
-// GenericFlag is a flag with type Generic
-type GenericFlag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value Generic
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f GenericFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f GenericFlag) GetName() string {
- return f.Name
-}
-
-// Generic looks up the value of a local GenericFlag, returns
-// nil if not found
-func (c *Context) Generic(name string) interface{} {
- return lookupGeneric(name, c.flagSet)
-}
-
-// GlobalGeneric looks up the value of a global GenericFlag, returns
-// nil if not found
-func (c *Context) GlobalGeneric(name string) interface{} {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupGeneric(name, fs)
- }
- return nil
-}
-
-func lookupGeneric(name string, set *flag.FlagSet) interface{} {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := f.Value, error(nil)
- if err != nil {
- return nil
- }
- return parsed
- }
- return nil
-}
-
-// Int64Flag is a flag with type int64
-type Int64Flag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value int64
- Destination *int64
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f Int64Flag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f Int64Flag) GetName() string {
- return f.Name
-}
-
-// Int64 looks up the value of a local Int64Flag, returns
-// 0 if not found
-func (c *Context) Int64(name string) int64 {
- return lookupInt64(name, c.flagSet)
-}
-
-// GlobalInt64 looks up the value of a global Int64Flag, returns
-// 0 if not found
-func (c *Context) GlobalInt64(name string) int64 {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupInt64(name, fs)
- }
- return 0
-}
-
-func lookupInt64(name string, set *flag.FlagSet) int64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseInt(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
-
-// IntFlag is a flag with type int
-type IntFlag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value int
- Destination *int
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f IntFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f IntFlag) GetName() string {
- return f.Name
-}
-
-// Int looks up the value of a local IntFlag, returns
-// 0 if not found
-func (c *Context) Int(name string) int {
- return lookupInt(name, c.flagSet)
-}
-
-// GlobalInt looks up the value of a global IntFlag, returns
-// 0 if not found
-func (c *Context) GlobalInt(name string) int {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupInt(name, fs)
- }
- return 0
-}
-
-func lookupInt(name string, set *flag.FlagSet) int {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseInt(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return int(parsed)
- }
- return 0
-}
-
-// IntSliceFlag is a flag with type *IntSlice
-type IntSliceFlag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value *IntSlice
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f IntSliceFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f IntSliceFlag) GetName() string {
- return f.Name
-}
-
-// IntSlice looks up the value of a local IntSliceFlag, returns
-// nil if not found
-func (c *Context) IntSlice(name string) []int {
- return lookupIntSlice(name, c.flagSet)
-}
-
-// GlobalIntSlice looks up the value of a global IntSliceFlag, returns
-// nil if not found
-func (c *Context) GlobalIntSlice(name string) []int {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupIntSlice(name, fs)
- }
- return nil
-}
-
-func lookupIntSlice(name string, set *flag.FlagSet) []int {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := (f.Value.(*IntSlice)).Value(), error(nil)
- if err != nil {
- return nil
- }
- return parsed
- }
- return nil
-}
-
-// Int64SliceFlag is a flag with type *Int64Slice
-type Int64SliceFlag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value *Int64Slice
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f Int64SliceFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f Int64SliceFlag) GetName() string {
- return f.Name
-}
-
-// Int64Slice looks up the value of a local Int64SliceFlag, returns
-// nil if not found
-func (c *Context) Int64Slice(name string) []int64 {
- return lookupInt64Slice(name, c.flagSet)
-}
-
-// GlobalInt64Slice looks up the value of a global Int64SliceFlag, returns
-// nil if not found
-func (c *Context) GlobalInt64Slice(name string) []int64 {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupInt64Slice(name, fs)
- }
- return nil
-}
-
-func lookupInt64Slice(name string, set *flag.FlagSet) []int64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := (f.Value.(*Int64Slice)).Value(), error(nil)
- if err != nil {
- return nil
- }
- return parsed
- }
- return nil
-}
-
-// StringFlag is a flag with type string
-type StringFlag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value string
- Destination *string
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f StringFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f StringFlag) GetName() string {
- return f.Name
-}
-
-// String looks up the value of a local StringFlag, returns
-// "" if not found
-func (c *Context) String(name string) string {
- return lookupString(name, c.flagSet)
-}
-
-// GlobalString looks up the value of a global StringFlag, returns
-// "" if not found
-func (c *Context) GlobalString(name string) string {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupString(name, fs)
- }
- return ""
-}
-
-func lookupString(name string, set *flag.FlagSet) string {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := f.Value.String(), error(nil)
- if err != nil {
- return ""
- }
- return parsed
- }
- return ""
-}
-
-// StringSliceFlag is a flag with type *StringSlice
-type StringSliceFlag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value *StringSlice
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f StringSliceFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f StringSliceFlag) GetName() string {
- return f.Name
-}
-
-// StringSlice looks up the value of a local StringSliceFlag, returns
-// nil if not found
-func (c *Context) StringSlice(name string) []string {
- return lookupStringSlice(name, c.flagSet)
-}
-
-// GlobalStringSlice looks up the value of a global StringSliceFlag, returns
-// nil if not found
-func (c *Context) GlobalStringSlice(name string) []string {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupStringSlice(name, fs)
- }
- return nil
-}
-
-func lookupStringSlice(name string, set *flag.FlagSet) []string {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := (f.Value.(*StringSlice)).Value(), error(nil)
- if err != nil {
- return nil
- }
- return parsed
- }
- return nil
-}
-
-// Uint64Flag is a flag with type uint64
-type Uint64Flag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value uint64
- Destination *uint64
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f Uint64Flag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f Uint64Flag) GetName() string {
- return f.Name
-}
-
-// Uint64 looks up the value of a local Uint64Flag, returns
-// 0 if not found
-func (c *Context) Uint64(name string) uint64 {
- return lookupUint64(name, c.flagSet)
-}
-
-// GlobalUint64 looks up the value of a global Uint64Flag, returns
-// 0 if not found
-func (c *Context) GlobalUint64(name string) uint64 {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupUint64(name, fs)
- }
- return 0
-}
-
-func lookupUint64(name string, set *flag.FlagSet) uint64 {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseUint(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return parsed
- }
- return 0
-}
-
-// UintFlag is a flag with type uint
-type UintFlag struct {
- Name string
- Usage string
- EnvVar string
- Hidden bool
- Value uint
- Destination *uint
-}
-
-// String returns a readable representation of this value
-// (for usage defaults)
-func (f UintFlag) String() string {
- return FlagStringer(f)
-}
-
-// GetName returns the name of the flag
-func (f UintFlag) GetName() string {
- return f.Name
-}
-
-// Uint looks up the value of a local UintFlag, returns
-// 0 if not found
-func (c *Context) Uint(name string) uint {
- return lookupUint(name, c.flagSet)
-}
-
-// GlobalUint looks up the value of a global UintFlag, returns
-// 0 if not found
-func (c *Context) GlobalUint(name string) uint {
- if fs := lookupGlobalFlagSet(name, c); fs != nil {
- return lookupUint(name, fs)
- }
- return 0
-}
-
-func lookupUint(name string, set *flag.FlagSet) uint {
- f := set.Lookup(name)
- if f != nil {
- parsed, err := strconv.ParseUint(f.Value.String(), 0, 64)
- if err != nil {
- return 0
- }
- return uint(parsed)
- }
- return 0
-}
diff --git a/vendor/src/github.com/codegangsta/cli/flag_test.go b/vendor/src/github.com/codegangsta/cli/flag_test.go
deleted file mode 100644
index a7afcc4..0000000
--- a/vendor/src/github.com/codegangsta/cli/flag_test.go
+++ /dev/null
@@ -1,1092 +0,0 @@
-package cli
-
-import (
- "fmt"
- "os"
- "reflect"
- "runtime"
- "strings"
- "testing"
- "time"
-)
-
-var boolFlagTests = []struct {
- name string
- expected string
-}{
- {"help", "--help\t"},
- {"h", "-h\t"},
-}
-
-func TestBoolFlagHelpOutput(t *testing.T) {
- for _, test := range boolFlagTests {
- flag := BoolFlag{Name: test.name}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%q does not match %q", output, test.expected)
- }
- }
-}
-
-var stringFlagTests = []struct {
- name string
- usage string
- value string
- expected string
-}{
- {"foo", "", "", "--foo value\t"},
- {"f", "", "", "-f value\t"},
- {"f", "The total `foo` desired", "all", "-f foo\tThe total foo desired (default: \"all\")"},
- {"test", "", "Something", "--test value\t(default: \"Something\")"},
- {"config,c", "Load configuration from `FILE`", "", "--config FILE, -c FILE\tLoad configuration from FILE"},
- {"config,c", "Load configuration from `CONFIG`", "config.json", "--config CONFIG, -c CONFIG\tLoad configuration from CONFIG (default: \"config.json\")"},
-}
-
-func TestStringFlagHelpOutput(t *testing.T) {
- for _, test := range stringFlagTests {
- flag := StringFlag{Name: test.name, Usage: test.usage, Value: test.value}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%q does not match %q", output, test.expected)
- }
- }
-}
-
-func TestStringFlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_FOO", "derp")
- for _, test := range stringFlagTests {
- flag := StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"}
- output := flag.String()
-
- expectedSuffix := " [$APP_FOO]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_FOO%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%s does not end with"+expectedSuffix, output)
- }
- }
-}
-
-var stringSliceFlagTests = []struct {
- name string
- value *StringSlice
- expected string
-}{
- {"foo", func() *StringSlice {
- s := &StringSlice{}
- s.Set("")
- return s
- }(), "--foo value\t"},
- {"f", func() *StringSlice {
- s := &StringSlice{}
- s.Set("")
- return s
- }(), "-f value\t"},
- {"f", func() *StringSlice {
- s := &StringSlice{}
- s.Set("Lipstick")
- return s
- }(), "-f value\t(default: \"Lipstick\")"},
- {"test", func() *StringSlice {
- s := &StringSlice{}
- s.Set("Something")
- return s
- }(), "--test value\t(default: \"Something\")"},
-}
-
-func TestStringSliceFlagHelpOutput(t *testing.T) {
- for _, test := range stringSliceFlagTests {
- flag := StringSliceFlag{Name: test.name, Value: test.value}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%q does not match %q", output, test.expected)
- }
- }
-}
-
-func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_QWWX", "11,4")
- for _, test := range stringSliceFlagTests {
- flag := StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"}
- output := flag.String()
-
- expectedSuffix := " [$APP_QWWX]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_QWWX%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%q does not end with"+expectedSuffix, output)
- }
- }
-}
-
-var intFlagTests = []struct {
- name string
- expected string
-}{
- {"hats", "--hats value\t(default: 9)"},
- {"H", "-H value\t(default: 9)"},
-}
-
-func TestIntFlagHelpOutput(t *testing.T) {
- for _, test := range intFlagTests {
- flag := IntFlag{Name: test.name, Value: 9}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%s does not match %s", output, test.expected)
- }
- }
-}
-
-func TestIntFlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_BAR", "2")
- for _, test := range intFlagTests {
- flag := IntFlag{Name: test.name, EnvVar: "APP_BAR"}
- output := flag.String()
-
- expectedSuffix := " [$APP_BAR]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_BAR%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%s does not end with"+expectedSuffix, output)
- }
- }
-}
-
-var int64FlagTests = []struct {
- name string
- expected string
-}{
- {"hats", "--hats value\t(default: 8589934592)"},
- {"H", "-H value\t(default: 8589934592)"},
-}
-
-func TestInt64FlagHelpOutput(t *testing.T) {
- for _, test := range int64FlagTests {
- flag := Int64Flag{Name: test.name, Value: 8589934592}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%s does not match %s", output, test.expected)
- }
- }
-}
-
-func TestInt64FlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_BAR", "2")
- for _, test := range int64FlagTests {
- flag := IntFlag{Name: test.name, EnvVar: "APP_BAR"}
- output := flag.String()
-
- expectedSuffix := " [$APP_BAR]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_BAR%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%s does not end with"+expectedSuffix, output)
- }
- }
-}
-
-var uintFlagTests = []struct {
- name string
- expected string
-}{
- {"nerfs", "--nerfs value\t(default: 41)"},
- {"N", "-N value\t(default: 41)"},
-}
-
-func TestUintFlagHelpOutput(t *testing.T) {
- for _, test := range uintFlagTests {
- flag := UintFlag{Name: test.name, Value: 41}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%s does not match %s", output, test.expected)
- }
- }
-}
-
-func TestUintFlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_BAR", "2")
- for _, test := range uintFlagTests {
- flag := UintFlag{Name: test.name, EnvVar: "APP_BAR"}
- output := flag.String()
-
- expectedSuffix := " [$APP_BAR]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_BAR%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%s does not end with"+expectedSuffix, output)
- }
- }
-}
-
-var uint64FlagTests = []struct {
- name string
- expected string
-}{
- {"gerfs", "--gerfs value\t(default: 8589934582)"},
- {"G", "-G value\t(default: 8589934582)"},
-}
-
-func TestUint64FlagHelpOutput(t *testing.T) {
- for _, test := range uint64FlagTests {
- flag := Uint64Flag{Name: test.name, Value: 8589934582}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%s does not match %s", output, test.expected)
- }
- }
-}
-
-func TestUint64FlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_BAR", "2")
- for _, test := range uint64FlagTests {
- flag := UintFlag{Name: test.name, EnvVar: "APP_BAR"}
- output := flag.String()
-
- expectedSuffix := " [$APP_BAR]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_BAR%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%s does not end with"+expectedSuffix, output)
- }
- }
-}
-
-var durationFlagTests = []struct {
- name string
- expected string
-}{
- {"hooting", "--hooting value\t(default: 1s)"},
- {"H", "-H value\t(default: 1s)"},
-}
-
-func TestDurationFlagHelpOutput(t *testing.T) {
- for _, test := range durationFlagTests {
- flag := DurationFlag{Name: test.name, Value: 1 * time.Second}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%q does not match %q", output, test.expected)
- }
- }
-}
-
-func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_BAR", "2h3m6s")
- for _, test := range durationFlagTests {
- flag := DurationFlag{Name: test.name, EnvVar: "APP_BAR"}
- output := flag.String()
-
- expectedSuffix := " [$APP_BAR]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_BAR%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%s does not end with"+expectedSuffix, output)
- }
- }
-}
-
-var intSliceFlagTests = []struct {
- name string
- value *IntSlice
- expected string
-}{
- {"heads", &IntSlice{}, "--heads value\t"},
- {"H", &IntSlice{}, "-H value\t"},
- {"H, heads", func() *IntSlice {
- i := &IntSlice{}
- i.Set("9")
- i.Set("3")
- return i
- }(), "-H value, --heads value\t(default: 9, 3)"},
-}
-
-func TestIntSliceFlagHelpOutput(t *testing.T) {
- for _, test := range intSliceFlagTests {
- flag := IntSliceFlag{Name: test.name, Value: test.value}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%q does not match %q", output, test.expected)
- }
- }
-}
-
-func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_SMURF", "42,3")
- for _, test := range intSliceFlagTests {
- flag := IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"}
- output := flag.String()
-
- expectedSuffix := " [$APP_SMURF]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_SMURF%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%q does not end with"+expectedSuffix, output)
- }
- }
-}
-
-var int64SliceFlagTests = []struct {
- name string
- value *Int64Slice
- expected string
-}{
- {"heads", &Int64Slice{}, "--heads value\t"},
- {"H", &Int64Slice{}, "-H value\t"},
- {"H, heads", func() *Int64Slice {
- i := &Int64Slice{}
- i.Set("2")
- i.Set("17179869184")
- return i
- }(), "-H value, --heads value\t(default: 2, 17179869184)"},
-}
-
-func TestInt64SliceFlagHelpOutput(t *testing.T) {
- for _, test := range int64SliceFlagTests {
- flag := Int64SliceFlag{Name: test.name, Value: test.value}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%q does not match %q", output, test.expected)
- }
- }
-}
-
-func TestInt64SliceFlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_SMURF", "42,17179869184")
- for _, test := range int64SliceFlagTests {
- flag := Int64SliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"}
- output := flag.String()
-
- expectedSuffix := " [$APP_SMURF]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_SMURF%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%q does not end with"+expectedSuffix, output)
- }
- }
-}
-
-var float64FlagTests = []struct {
- name string
- expected string
-}{
- {"hooting", "--hooting value\t(default: 0.1)"},
- {"H", "-H value\t(default: 0.1)"},
-}
-
-func TestFloat64FlagHelpOutput(t *testing.T) {
- for _, test := range float64FlagTests {
- flag := Float64Flag{Name: test.name, Value: float64(0.1)}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%q does not match %q", output, test.expected)
- }
- }
-}
-
-func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_BAZ", "99.4")
- for _, test := range float64FlagTests {
- flag := Float64Flag{Name: test.name, EnvVar: "APP_BAZ"}
- output := flag.String()
-
- expectedSuffix := " [$APP_BAZ]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_BAZ%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%s does not end with"+expectedSuffix, output)
- }
- }
-}
-
-var genericFlagTests = []struct {
- name string
- value Generic
- expected string
-}{
- {"toads", &Parser{"abc", "def"}, "--toads value\ttest flag (default: abc,def)"},
- {"t", &Parser{"abc", "def"}, "-t value\ttest flag (default: abc,def)"},
-}
-
-func TestGenericFlagHelpOutput(t *testing.T) {
- for _, test := range genericFlagTests {
- flag := GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"}
- output := flag.String()
-
- if output != test.expected {
- t.Errorf("%q does not match %q", output, test.expected)
- }
- }
-}
-
-func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_ZAP", "3")
- for _, test := range genericFlagTests {
- flag := GenericFlag{Name: test.name, EnvVar: "APP_ZAP"}
- output := flag.String()
-
- expectedSuffix := " [$APP_ZAP]"
- if runtime.GOOS == "windows" {
- expectedSuffix = " [%APP_ZAP%]"
- }
- if !strings.HasSuffix(output, expectedSuffix) {
- t.Errorf("%s does not end with"+expectedSuffix, output)
- }
- }
-}
-
-func TestParseMultiString(t *testing.T) {
- (&App{
- Flags: []Flag{
- StringFlag{Name: "serve, s"},
- },
- Action: func(ctx *Context) error {
- if ctx.String("serve") != "10" {
- t.Errorf("main name not set")
- }
- if ctx.String("s") != "10" {
- t.Errorf("short name not set")
- }
- return nil
- },
- }).Run([]string{"run", "-s", "10"})
-}
-
-func TestParseDestinationString(t *testing.T) {
- var dest string
- a := App{
- Flags: []Flag{
- StringFlag{
- Name: "dest",
- Destination: &dest,
- },
- },
- Action: func(ctx *Context) error {
- if dest != "10" {
- t.Errorf("expected destination String 10")
- }
- return nil
- },
- }
- a.Run([]string{"run", "--dest", "10"})
-}
-
-func TestParseMultiStringFromEnv(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_COUNT", "20")
- (&App{
- Flags: []Flag{
- StringFlag{Name: "count, c", EnvVar: "APP_COUNT"},
- },
- Action: func(ctx *Context) error {
- if ctx.String("count") != "20" {
- t.Errorf("main name not set")
- }
- if ctx.String("c") != "20" {
- t.Errorf("short name not set")
- }
- return nil
- },
- }).Run([]string{"run"})
-}
-
-func TestParseMultiStringFromEnvCascade(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_COUNT", "20")
- (&App{
- Flags: []Flag{
- StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"},
- },
- Action: func(ctx *Context) error {
- if ctx.String("count") != "20" {
- t.Errorf("main name not set")
- }
- if ctx.String("c") != "20" {
- t.Errorf("short name not set")
- }
- return nil
- },
- }).Run([]string{"run"})
-}
-
-func TestParseMultiStringSlice(t *testing.T) {
- (&App{
- Flags: []Flag{
- StringSliceFlag{Name: "serve, s", Value: &StringSlice{}},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) {
- t.Errorf("main name not set")
- }
- if !reflect.DeepEqual(ctx.StringSlice("s"), []string{"10", "20"}) {
- t.Errorf("short name not set")
- }
- return nil
- },
- }).Run([]string{"run", "-s", "10", "-s", "20"})
-}
-
-func TestParseMultiStringSliceFromEnv(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_INTERVALS", "20,30,40")
-
- (&App{
- Flags: []Flag{
- StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "APP_INTERVALS"},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) {
- t.Errorf("main name not set from env")
- }
- if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }).Run([]string{"run"})
-}
-
-func TestParseMultiStringSliceFromEnvCascade(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_INTERVALS", "20,30,40")
-
- (&App{
- Flags: []Flag{
- StringSliceFlag{Name: "intervals, i", Value: &StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) {
- t.Errorf("main name not set from env")
- }
- if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }).Run([]string{"run"})
-}
-
-func TestParseMultiInt(t *testing.T) {
- a := App{
- Flags: []Flag{
- IntFlag{Name: "serve, s"},
- },
- Action: func(ctx *Context) error {
- if ctx.Int("serve") != 10 {
- t.Errorf("main name not set")
- }
- if ctx.Int("s") != 10 {
- t.Errorf("short name not set")
- }
- return nil
- },
- }
- a.Run([]string{"run", "-s", "10"})
-}
-
-func TestParseDestinationInt(t *testing.T) {
- var dest int
- a := App{
- Flags: []Flag{
- IntFlag{
- Name: "dest",
- Destination: &dest,
- },
- },
- Action: func(ctx *Context) error {
- if dest != 10 {
- t.Errorf("expected destination Int 10")
- }
- return nil
- },
- }
- a.Run([]string{"run", "--dest", "10"})
-}
-
-func TestParseMultiIntFromEnv(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_TIMEOUT_SECONDS", "10")
- a := App{
- Flags: []Flag{
- IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"},
- },
- Action: func(ctx *Context) error {
- if ctx.Int("timeout") != 10 {
- t.Errorf("main name not set")
- }
- if ctx.Int("t") != 10 {
- t.Errorf("short name not set")
- }
- return nil
- },
- }
- a.Run([]string{"run"})
-}
-
-func TestParseMultiIntFromEnvCascade(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_TIMEOUT_SECONDS", "10")
- a := App{
- Flags: []Flag{
- IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"},
- },
- Action: func(ctx *Context) error {
- if ctx.Int("timeout") != 10 {
- t.Errorf("main name not set")
- }
- if ctx.Int("t") != 10 {
- t.Errorf("short name not set")
- }
- return nil
- },
- }
- a.Run([]string{"run"})
-}
-
-func TestParseMultiIntSlice(t *testing.T) {
- (&App{
- Flags: []Flag{
- IntSliceFlag{Name: "serve, s", Value: &IntSlice{}},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) {
- t.Errorf("main name not set")
- }
- if !reflect.DeepEqual(ctx.IntSlice("s"), []int{10, 20}) {
- t.Errorf("short name not set")
- }
- return nil
- },
- }).Run([]string{"run", "-s", "10", "-s", "20"})
-}
-
-func TestParseMultiIntSliceFromEnv(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_INTERVALS", "20,30,40")
-
- (&App{
- Flags: []Flag{
- IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "APP_INTERVALS"},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) {
- t.Errorf("main name not set from env")
- }
- if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }).Run([]string{"run"})
-}
-
-func TestParseMultiIntSliceFromEnvCascade(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_INTERVALS", "20,30,40")
-
- (&App{
- Flags: []Flag{
- IntSliceFlag{Name: "intervals, i", Value: &IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) {
- t.Errorf("main name not set from env")
- }
- if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }).Run([]string{"run"})
-}
-
-func TestParseMultiInt64Slice(t *testing.T) {
- (&App{
- Flags: []Flag{
- Int64SliceFlag{Name: "serve, s", Value: &Int64Slice{}},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.Int64Slice("serve"), []int64{10, 17179869184}) {
- t.Errorf("main name not set")
- }
- if !reflect.DeepEqual(ctx.Int64Slice("s"), []int64{10, 17179869184}) {
- t.Errorf("short name not set")
- }
- return nil
- },
- }).Run([]string{"run", "-s", "10", "-s", "17179869184"})
-}
-
-func TestParseMultiInt64SliceFromEnv(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_INTERVALS", "20,30,17179869184")
-
- (&App{
- Flags: []Flag{
- Int64SliceFlag{Name: "intervals, i", Value: &Int64Slice{}, EnvVar: "APP_INTERVALS"},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.Int64Slice("intervals"), []int64{20, 30, 17179869184}) {
- t.Errorf("main name not set from env")
- }
- if !reflect.DeepEqual(ctx.Int64Slice("i"), []int64{20, 30, 17179869184}) {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }).Run([]string{"run"})
-}
-
-func TestParseMultiInt64SliceFromEnvCascade(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_INTERVALS", "20,30,17179869184")
-
- (&App{
- Flags: []Flag{
- Int64SliceFlag{Name: "intervals, i", Value: &Int64Slice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.Int64Slice("intervals"), []int64{20, 30, 17179869184}) {
- t.Errorf("main name not set from env")
- }
- if !reflect.DeepEqual(ctx.Int64Slice("i"), []int64{20, 30, 17179869184}) {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }).Run([]string{"run"})
-}
-
-func TestParseMultiFloat64(t *testing.T) {
- a := App{
- Flags: []Flag{
- Float64Flag{Name: "serve, s"},
- },
- Action: func(ctx *Context) error {
- if ctx.Float64("serve") != 10.2 {
- t.Errorf("main name not set")
- }
- if ctx.Float64("s") != 10.2 {
- t.Errorf("short name not set")
- }
- return nil
- },
- }
- a.Run([]string{"run", "-s", "10.2"})
-}
-
-func TestParseDestinationFloat64(t *testing.T) {
- var dest float64
- a := App{
- Flags: []Flag{
- Float64Flag{
- Name: "dest",
- Destination: &dest,
- },
- },
- Action: func(ctx *Context) error {
- if dest != 10.2 {
- t.Errorf("expected destination Float64 10.2")
- }
- return nil
- },
- }
- a.Run([]string{"run", "--dest", "10.2"})
-}
-
-func TestParseMultiFloat64FromEnv(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_TIMEOUT_SECONDS", "15.5")
- a := App{
- Flags: []Flag{
- Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"},
- },
- Action: func(ctx *Context) error {
- if ctx.Float64("timeout") != 15.5 {
- t.Errorf("main name not set")
- }
- if ctx.Float64("t") != 15.5 {
- t.Errorf("short name not set")
- }
- return nil
- },
- }
- a.Run([]string{"run"})
-}
-
-func TestParseMultiFloat64FromEnvCascade(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_TIMEOUT_SECONDS", "15.5")
- a := App{
- Flags: []Flag{
- Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"},
- },
- Action: func(ctx *Context) error {
- if ctx.Float64("timeout") != 15.5 {
- t.Errorf("main name not set")
- }
- if ctx.Float64("t") != 15.5 {
- t.Errorf("short name not set")
- }
- return nil
- },
- }
- a.Run([]string{"run"})
-}
-
-func TestParseMultiBool(t *testing.T) {
- a := App{
- Flags: []Flag{
- BoolFlag{Name: "serve, s"},
- },
- Action: func(ctx *Context) error {
- if ctx.Bool("serve") != true {
- t.Errorf("main name not set")
- }
- if ctx.Bool("s") != true {
- t.Errorf("short name not set")
- }
- return nil
- },
- }
- a.Run([]string{"run", "--serve"})
-}
-
-func TestParseDestinationBool(t *testing.T) {
- var dest bool
- a := App{
- Flags: []Flag{
- BoolFlag{
- Name: "dest",
- Destination: &dest,
- },
- },
- Action: func(ctx *Context) error {
- if dest != true {
- t.Errorf("expected destination Bool true")
- }
- return nil
- },
- }
- a.Run([]string{"run", "--dest"})
-}
-
-func TestParseMultiBoolFromEnv(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_DEBUG", "1")
- a := App{
- Flags: []Flag{
- BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"},
- },
- Action: func(ctx *Context) error {
- if ctx.Bool("debug") != true {
- t.Errorf("main name not set from env")
- }
- if ctx.Bool("d") != true {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }
- a.Run([]string{"run"})
-}
-
-func TestParseMultiBoolFromEnvCascade(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_DEBUG", "1")
- a := App{
- Flags: []Flag{
- BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"},
- },
- Action: func(ctx *Context) error {
- if ctx.Bool("debug") != true {
- t.Errorf("main name not set from env")
- }
- if ctx.Bool("d") != true {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }
- a.Run([]string{"run"})
-}
-
-func TestParseMultiBoolT(t *testing.T) {
- a := App{
- Flags: []Flag{
- BoolTFlag{Name: "serve, s"},
- },
- Action: func(ctx *Context) error {
- if ctx.BoolT("serve") != true {
- t.Errorf("main name not set")
- }
- if ctx.BoolT("s") != true {
- t.Errorf("short name not set")
- }
- return nil
- },
- }
- a.Run([]string{"run", "--serve"})
-}
-
-func TestParseDestinationBoolT(t *testing.T) {
- var dest bool
- a := App{
- Flags: []Flag{
- BoolTFlag{
- Name: "dest",
- Destination: &dest,
- },
- },
- Action: func(ctx *Context) error {
- if dest != true {
- t.Errorf("expected destination BoolT true")
- }
- return nil
- },
- }
- a.Run([]string{"run", "--dest"})
-}
-
-func TestParseMultiBoolTFromEnv(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_DEBUG", "0")
- a := App{
- Flags: []Flag{
- BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"},
- },
- Action: func(ctx *Context) error {
- if ctx.BoolT("debug") != false {
- t.Errorf("main name not set from env")
- }
- if ctx.BoolT("d") != false {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }
- a.Run([]string{"run"})
-}
-
-func TestParseMultiBoolTFromEnvCascade(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_DEBUG", "0")
- a := App{
- Flags: []Flag{
- BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"},
- },
- Action: func(ctx *Context) error {
- if ctx.BoolT("debug") != false {
- t.Errorf("main name not set from env")
- }
- if ctx.BoolT("d") != false {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }
- a.Run([]string{"run"})
-}
-
-type Parser [2]string
-
-func (p *Parser) Set(value string) error {
- parts := strings.Split(value, ",")
- if len(parts) != 2 {
- return fmt.Errorf("invalid format")
- }
-
- (*p)[0] = parts[0]
- (*p)[1] = parts[1]
-
- return nil
-}
-
-func (p *Parser) String() string {
- return fmt.Sprintf("%s,%s", p[0], p[1])
-}
-
-func TestParseGeneric(t *testing.T) {
- a := App{
- Flags: []Flag{
- GenericFlag{Name: "serve, s", Value: &Parser{}},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) {
- t.Errorf("main name not set")
- }
- if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"10", "20"}) {
- t.Errorf("short name not set")
- }
- return nil
- },
- }
- a.Run([]string{"run", "-s", "10,20"})
-}
-
-func TestParseGenericFromEnv(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_SERVE", "20,30")
- a := App{
- Flags: []Flag{
- GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) {
- t.Errorf("main name not set from env")
- }
- if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"20", "30"}) {
- t.Errorf("short name not set from env")
- }
- return nil
- },
- }
- a.Run([]string{"run"})
-}
-
-func TestParseGenericFromEnvCascade(t *testing.T) {
- os.Clearenv()
- os.Setenv("APP_FOO", "99,2000")
- a := App{
- Flags: []Flag{
- GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"},
- },
- Action: func(ctx *Context) error {
- if !reflect.DeepEqual(ctx.Generic("foos"), &Parser{"99", "2000"}) {
- t.Errorf("value not set from env")
- }
- return nil
- },
- }
- a.Run([]string{"run"})
-}
diff --git a/vendor/src/github.com/codegangsta/cli/funcs.go b/vendor/src/github.com/codegangsta/cli/funcs.go
deleted file mode 100644
index cba5e6c..0000000
--- a/vendor/src/github.com/codegangsta/cli/funcs.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package cli
-
-// BashCompleteFunc is an action to execute when the bash-completion flag is set
-type BashCompleteFunc func(*Context)
-
-// BeforeFunc is an action to execute before any subcommands are run, but after
-// the context is ready if a non-nil error is returned, no subcommands are run
-type BeforeFunc func(*Context) error
-
-// AfterFunc is an action to execute after any subcommands are run, but after the
-// subcommand has finished it is run even if Action() panics
-type AfterFunc func(*Context) error
-
-// ActionFunc is the action to execute when no subcommands are specified
-type ActionFunc func(*Context) error
-
-// CommandNotFoundFunc is executed if the proper command cannot be found
-type CommandNotFoundFunc func(*Context, string)
-
-// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying
-// customized usage error messages. This function is able to replace the
-// original error messages. If this function is not set, the "Incorrect usage"
-// is displayed and the execution is interrupted.
-type OnUsageErrorFunc func(context *Context, err error, isSubcommand bool) error
-
-// FlagStringFunc is used by the help generation to display a flag, which is
-// expected to be a single line.
-type FlagStringFunc func(Flag) string
diff --git a/vendor/src/github.com/codegangsta/cli/generate-flag-types b/vendor/src/github.com/codegangsta/cli/generate-flag-types
deleted file mode 100644
index 47a168b..0000000
--- a/vendor/src/github.com/codegangsta/cli/generate-flag-types
+++ /dev/null
@@ -1,248 +0,0 @@
-#!/usr/bin/env python
-"""
-The flag types that ship with the cli library have many things in common, and
-so we can take advantage of the `go generate` command to create much of the
-source code from a list of definitions. These definitions attempt to cover
-the parts that vary between flag types, and should evolve as needed.
-
-An example of the minimum definition needed is:
-
- {
- "name": "SomeType",
- "type": "sometype",
- "context_default": "nil"
- }
-
-In this example, the code generated for the `cli` package will include a type
-named `SomeTypeFlag` that is expected to wrap a value of type `sometype`.
-Fetching values by name via `*cli.Context` will default to a value of `nil`.
-
-A more complete, albeit somewhat redundant, example showing all available
-definition keys is:
-
- {
- "name": "VeryMuchType",
- "type": "*VeryMuchType",
- "value": true,
- "dest": false,
- "doctail": " which really only wraps a []float64, oh well!",
- "context_type": "[]float64",
- "context_default": "nil",
- "parser": "parseVeryMuchType(f.Value.String())",
- "parser_cast": "[]float64(parsed)"
- }
-
-The meaning of each field is as follows:
-
- name (string) - The type "name", which will be suffixed with
- `Flag` when generating the type definition
- for `cli` and the wrapper type for `altsrc`
- type (string) - The type that the generated `Flag` type for `cli`
- is expected to "contain" as its `.Value` member
- value (bool) - Should the generated `cli` type have a `Value`
- member?
- dest (bool) - Should the generated `cli` type support a
- destination pointer?
- doctail (string) - Additional docs for the `cli` flag type comment
- context_type (string) - The literal type used in the `*cli.Context`
- reader func signature
- context_default (string) - The literal value used as the default by the
- `*cli.Context` reader funcs when no value is
- present
- parser (string) - Literal code used to parse the flag `f`,
- expected to have a return signature of
- (value, error)
- parser_cast (string) - Literal code used to cast the `parsed` value
- returned from the `parser` code
-"""
-
-from __future__ import print_function, unicode_literals
-
-import argparse
-import json
-import os
-import subprocess
-import sys
-import tempfile
-import textwrap
-
-
-class _FancyFormatter(argparse.ArgumentDefaultsHelpFormatter,
- argparse.RawDescriptionHelpFormatter):
- pass
-
-
-def main(sysargs=sys.argv[:]):
- parser = argparse.ArgumentParser(
- description='Generate flag type code!',
- formatter_class=_FancyFormatter)
- parser.add_argument(
- 'package',
- type=str, default='cli', choices=_WRITEFUNCS.keys(),
- help='Package for which flag types will be generated'
- )
- parser.add_argument(
- '-i', '--in-json',
- type=argparse.FileType('r'),
- default=sys.stdin,
- help='Input JSON file which defines each type to be generated'
- )
- parser.add_argument(
- '-o', '--out-go',
- type=argparse.FileType('w'),
- default=sys.stdout,
- help='Output file/stream to which generated source will be written'
- )
- parser.epilog = __doc__
-
- args = parser.parse_args(sysargs[1:])
- _generate_flag_types(_WRITEFUNCS[args.package], args.out_go, args.in_json)
- return 0
-
-
-def _generate_flag_types(writefunc, output_go, input_json):
- types = json.load(input_json)
-
- tmp = tempfile.NamedTemporaryFile(suffix='.go', delete=False)
- writefunc(tmp, types)
- tmp.close()
-
- new_content = subprocess.check_output(
- ['goimports', tmp.name]
- ).decode('utf-8')
-
- print(new_content, file=output_go, end='')
- output_go.flush()
- os.remove(tmp.name)
-
-
-def _set_typedef_defaults(typedef):
- typedef.setdefault('doctail', '')
- typedef.setdefault('context_type', typedef['type'])
- typedef.setdefault('dest', True)
- typedef.setdefault('value', True)
- typedef.setdefault('parser', 'f.Value, error(nil)')
- typedef.setdefault('parser_cast', 'parsed')
-
-
-def _write_cli_flag_types(outfile, types):
- _fwrite(outfile, """\
- package cli
-
- // WARNING: This file is generated!
-
- """)
-
- for typedef in types:
- _set_typedef_defaults(typedef)
-
- _fwrite(outfile, """\
- // {name}Flag is a flag with type {type}{doctail}
- type {name}Flag struct {{
- Name string
- Usage string
- EnvVar string
- Hidden bool
- """.format(**typedef))
-
- if typedef['value']:
- _fwrite(outfile, """\
- Value {type}
- """.format(**typedef))
-
- if typedef['dest']:
- _fwrite(outfile, """\
- Destination *{type}
- """.format(**typedef))
-
- _fwrite(outfile, "\n}\n\n")
-
- _fwrite(outfile, """\
- // String returns a readable representation of this value
- // (for usage defaults)
- func (f {name}Flag) String() string {{
- return FlagStringer(f)
- }}
-
- // GetName returns the name of the flag
- func (f {name}Flag) GetName() string {{
- return f.Name
- }}
-
- // {name} looks up the value of a local {name}Flag, returns
- // {context_default} if not found
- func (c *Context) {name}(name string) {context_type} {{
- return lookup{name}(name, c.flagSet)
- }}
-
- // Global{name} looks up the value of a global {name}Flag, returns
- // {context_default} if not found
- func (c *Context) Global{name}(name string) {context_type} {{
- if fs := lookupGlobalFlagSet(name, c); fs != nil {{
- return lookup{name}(name, fs)
- }}
- return {context_default}
- }}
-
- func lookup{name}(name string, set *flag.FlagSet) {context_type} {{
- f := set.Lookup(name)
- if f != nil {{
- parsed, err := {parser}
- if err != nil {{
- return {context_default}
- }}
- return {parser_cast}
- }}
- return {context_default}
- }}
- """.format(**typedef))
-
-
-def _write_altsrc_flag_types(outfile, types):
- _fwrite(outfile, """\
- package altsrc
-
- import (
- "gopkg.in/urfave/cli.v1"
- )
-
- // WARNING: This file is generated!
-
- """)
-
- for typedef in types:
- _set_typedef_defaults(typedef)
-
- _fwrite(outfile, """\
- // {name}Flag is the flag type that wraps cli.{name}Flag to allow
- // for other values to be specified
- type {name}Flag struct {{
- cli.{name}Flag
- set *flag.FlagSet
- }}
-
- // New{name}Flag creates a new {name}Flag
- func New{name}Flag(fl cli.{name}Flag) *{name}Flag {{
- return &{name}Flag{{{name}Flag: fl, set: nil}}
- }}
-
- // Apply saves the flagSet for later usage calls, then calls the
- // wrapped {name}Flag.Apply
- func (f *{name}Flag) Apply(set *flag.FlagSet) {{
- f.set = set
- f.{name}Flag.Apply(set)
- }}
- """.format(**typedef))
-
-
-def _fwrite(outfile, text):
- print(textwrap.dedent(text), end='', file=outfile)
-
-
-_WRITEFUNCS = {
- 'cli': _write_cli_flag_types,
- 'altsrc': _write_altsrc_flag_types
-}
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/vendor/src/github.com/codegangsta/cli/help.go b/vendor/src/github.com/codegangsta/cli/help.go
deleted file mode 100644
index ba34719..0000000
--- a/vendor/src/github.com/codegangsta/cli/help.go
+++ /dev/null
@@ -1,267 +0,0 @@
-package cli
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
- "text/tabwriter"
- "text/template"
-)
-
-// AppHelpTemplate is the text template for the Default help topic.
-// cli.go uses text/template to render templates. You can
-// render custom help text by setting this variable.
-var AppHelpTemplate = `NAME:
- {{.Name}} - {{.Usage}}
-
-USAGE:
- {{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
- {{if .Version}}{{if not .HideVersion}}
-VERSION:
- {{.Version}}
- {{end}}{{end}}{{if len .Authors}}
-AUTHOR(S):
- {{range .Authors}}{{.}}{{end}}
- {{end}}{{if .VisibleCommands}}
-COMMANDS:{{range .VisibleCategories}}{{if .Name}}
- {{.Name}}:{{end}}{{range .VisibleCommands}}
- {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}
-{{end}}{{end}}{{if .VisibleFlags}}
-GLOBAL OPTIONS:
- {{range .VisibleFlags}}{{.}}
- {{end}}{{end}}{{if .Copyright}}
-COPYRIGHT:
- {{.Copyright}}
- {{end}}
-`
-
-// CommandHelpTemplate is the text template for the command help topic.
-// cli.go uses text/template to render templates. You can
-// render custom help text by setting this variable.
-var CommandHelpTemplate = `NAME:
- {{.HelpName}} - {{.Usage}}
-
-USAGE:
- {{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}}
-
-CATEGORY:
- {{.Category}}{{end}}{{if .Description}}
-
-DESCRIPTION:
- {{.Description}}{{end}}{{if .VisibleFlags}}
-
-OPTIONS:
- {{range .VisibleFlags}}{{.}}
- {{end}}{{end}}
-`
-
-// SubcommandHelpTemplate is the text template for the subcommand help topic.
-// cli.go uses text/template to render templates. You can
-// render custom help text by setting this variable.
-var SubcommandHelpTemplate = `NAME:
- {{.HelpName}} - {{.Usage}}
-
-USAGE:
- {{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
-
-COMMANDS:{{range .VisibleCategories}}{{if .Name}}
- {{.Name}}:{{end}}{{range .VisibleCommands}}
- {{join .Names ", "}}{{"\t"}}{{.Usage}}{{end}}
-{{end}}{{if .VisibleFlags}}
-OPTIONS:
- {{range .VisibleFlags}}{{.}}
- {{end}}{{end}}
-`
-
-var helpCommand = Command{
- Name: "help",
- Aliases: []string{"h"},
- Usage: "Shows a list of commands or help for one command",
- ArgsUsage: "[command]",
- Action: func(c *Context) error {
- args := c.Args()
- if args.Present() {
- return ShowCommandHelp(c, args.First())
- }
-
- ShowAppHelp(c)
- return nil
- },
-}
-
-var helpSubcommand = Command{
- Name: "help",
- Aliases: []string{"h"},
- Usage: "Shows a list of commands or help for one command",
- ArgsUsage: "[command]",
- Action: func(c *Context) error {
- args := c.Args()
- if args.Present() {
- return ShowCommandHelp(c, args.First())
- }
-
- return ShowSubcommandHelp(c)
- },
-}
-
-// Prints help for the App or Command
-type helpPrinter func(w io.Writer, templ string, data interface{})
-
-// HelpPrinter is a function that writes the help output. If not set a default
-// is used. The function signature is:
-// func(w io.Writer, templ string, data interface{})
-var HelpPrinter helpPrinter = printHelp
-
-// VersionPrinter prints the version for the App
-var VersionPrinter = printVersion
-
-// ShowAppHelp is an action that displays the help.
-func ShowAppHelp(c *Context) error {
- HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
- return nil
-}
-
-// DefaultAppComplete prints the list of subcommands as the default app completion method
-func DefaultAppComplete(c *Context) {
- for _, command := range c.App.Commands {
- if command.Hidden {
- continue
- }
- for _, name := range command.Names() {
- fmt.Fprintln(c.App.Writer, name)
- }
- }
-}
-
-// ShowCommandHelp prints help for the given command
-func ShowCommandHelp(ctx *Context, command string) error {
- // show the subcommand help for a command with subcommands
- if command == "" {
- HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
- return nil
- }
-
- for _, c := range ctx.App.Commands {
- if c.HasName(command) {
- HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
- return nil
- }
- }
-
- if ctx.App.CommandNotFound == nil {
- return NewExitError(fmt.Sprintf("No help topic for '%v'", command), 3)
- }
-
- ctx.App.CommandNotFound(ctx, command)
- return nil
-}
-
-// ShowSubcommandHelp prints help for the given subcommand
-func ShowSubcommandHelp(c *Context) error {
- return ShowCommandHelp(c, c.Command.Name)
-}
-
-// ShowVersion prints the version number of the App
-func ShowVersion(c *Context) {
- VersionPrinter(c)
-}
-
-func printVersion(c *Context) {
- fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
-}
-
-// ShowCompletions prints the lists of commands within a given context
-func ShowCompletions(c *Context) {
- a := c.App
- if a != nil && a.BashComplete != nil {
- a.BashComplete(c)
- }
-}
-
-// ShowCommandCompletions prints the custom completions for a given command
-func ShowCommandCompletions(ctx *Context, command string) {
- c := ctx.App.Command(command)
- if c != nil && c.BashComplete != nil {
- c.BashComplete(ctx)
- }
-}
-
-func printHelp(out io.Writer, templ string, data interface{}) {
- funcMap := template.FuncMap{
- "join": strings.Join,
- }
-
- w := tabwriter.NewWriter(out, 1, 8, 2, ' ', 0)
- t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
- err := t.Execute(w, data)
- if err != nil {
- // If the writer is closed, t.Execute will fail, and there's nothing
- // we can do to recover.
- if os.Getenv("CLI_TEMPLATE_ERROR_DEBUG") != "" {
- fmt.Fprintf(ErrWriter, "CLI TEMPLATE ERROR: %#v\n", err)
- }
- return
- }
- w.Flush()
-}
-
-func checkVersion(c *Context) bool {
- found := false
- if VersionFlag.Name != "" {
- eachName(VersionFlag.Name, func(name string) {
- if c.GlobalBool(name) || c.Bool(name) {
- found = true
- }
- })
- }
- return found
-}
-
-func checkHelp(c *Context) bool {
- found := false
- if HelpFlag.Name != "" {
- eachName(HelpFlag.Name, func(name string) {
- if c.GlobalBool(name) || c.Bool(name) {
- found = true
- }
- })
- }
- return found
-}
-
-func checkCommandHelp(c *Context, name string) bool {
- if c.Bool("h") || c.Bool("help") {
- ShowCommandHelp(c, name)
- return true
- }
-
- return false
-}
-
-func checkSubcommandHelp(c *Context) bool {
- if c.Bool("h") || c.Bool("help") {
- ShowSubcommandHelp(c)
- return true
- }
-
- return false
-}
-
-func checkCompletions(c *Context) bool {
- if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion {
- ShowCompletions(c)
- return true
- }
-
- return false
-}
-
-func checkCommandCompletions(c *Context, name string) bool {
- if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion {
- ShowCommandCompletions(c, name)
- return true
- }
-
- return false
-}
diff --git a/vendor/src/github.com/codegangsta/cli/help_test.go b/vendor/src/github.com/codegangsta/cli/help_test.go
deleted file mode 100644
index 7c15400..0000000
--- a/vendor/src/github.com/codegangsta/cli/help_test.go
+++ /dev/null
@@ -1,289 +0,0 @@
-package cli
-
-import (
- "bytes"
- "flag"
- "strings"
- "testing"
-)
-
-func Test_ShowAppHelp_NoAuthor(t *testing.T) {
- output := new(bytes.Buffer)
- app := NewApp()
- app.Writer = output
-
- c := NewContext(app, nil, nil)
-
- ShowAppHelp(c)
-
- if bytes.Index(output.Bytes(), []byte("AUTHOR(S):")) != -1 {
- t.Errorf("expected\n%snot to include %s", output.String(), "AUTHOR(S):")
- }
-}
-
-func Test_ShowAppHelp_NoVersion(t *testing.T) {
- output := new(bytes.Buffer)
- app := NewApp()
- app.Writer = output
-
- app.Version = ""
-
- c := NewContext(app, nil, nil)
-
- ShowAppHelp(c)
-
- if bytes.Index(output.Bytes(), []byte("VERSION:")) != -1 {
- t.Errorf("expected\n%snot to include %s", output.String(), "VERSION:")
- }
-}
-
-func Test_ShowAppHelp_HideVersion(t *testing.T) {
- output := new(bytes.Buffer)
- app := NewApp()
- app.Writer = output
-
- app.HideVersion = true
-
- c := NewContext(app, nil, nil)
-
- ShowAppHelp(c)
-
- if bytes.Index(output.Bytes(), []byte("VERSION:")) != -1 {
- t.Errorf("expected\n%snot to include %s", output.String(), "VERSION:")
- }
-}
-
-func Test_Help_Custom_Flags(t *testing.T) {
- oldFlag := HelpFlag
- defer func() {
- HelpFlag = oldFlag
- }()
-
- HelpFlag = BoolFlag{
- Name: "help, x",
- Usage: "show help",
- }
-
- app := App{
- Flags: []Flag{
- BoolFlag{Name: "foo, h"},
- },
- Action: func(ctx *Context) error {
- if ctx.Bool("h") != true {
- t.Errorf("custom help flag not set")
- }
- return nil
- },
- }
- output := new(bytes.Buffer)
- app.Writer = output
- app.Run([]string{"test", "-h"})
- if output.Len() > 0 {
- t.Errorf("unexpected output: %s", output.String())
- }
-}
-
-func Test_Version_Custom_Flags(t *testing.T) {
- oldFlag := VersionFlag
- defer func() {
- VersionFlag = oldFlag
- }()
-
- VersionFlag = BoolFlag{
- Name: "version, V",
- Usage: "show version",
- }
-
- app := App{
- Flags: []Flag{
- BoolFlag{Name: "foo, v"},
- },
- Action: func(ctx *Context) error {
- if ctx.Bool("v") != true {
- t.Errorf("custom version flag not set")
- }
- return nil
- },
- }
- output := new(bytes.Buffer)
- app.Writer = output
- app.Run([]string{"test", "-v"})
- if output.Len() > 0 {
- t.Errorf("unexpected output: %s", output.String())
- }
-}
-
-func Test_helpCommand_Action_ErrorIfNoTopic(t *testing.T) {
- app := NewApp()
-
- set := flag.NewFlagSet("test", 0)
- set.Parse([]string{"foo"})
-
- c := NewContext(app, set, nil)
-
- err := helpCommand.Action.(func(*Context) error)(c)
-
- if err == nil {
- t.Fatalf("expected error from helpCommand.Action(), but got nil")
- }
-
- exitErr, ok := err.(*ExitError)
- if !ok {
- t.Fatalf("expected ExitError from helpCommand.Action(), but instead got: %v", err.Error())
- }
-
- if !strings.HasPrefix(exitErr.Error(), "No help topic for") {
- t.Fatalf("expected an unknown help topic error, but got: %v", exitErr.Error())
- }
-
- if exitErr.exitCode != 3 {
- t.Fatalf("expected exit value = 3, got %d instead", exitErr.exitCode)
- }
-}
-
-func Test_helpCommand_InHelpOutput(t *testing.T) {
- app := NewApp()
- output := &bytes.Buffer{}
- app.Writer = output
- app.Run([]string{"test", "--help"})
-
- s := output.String()
-
- if strings.Contains(s, "\nCOMMANDS:\nGLOBAL OPTIONS:\n") {
- t.Fatalf("empty COMMANDS section detected: %q", s)
- }
-
- if !strings.Contains(s, "help, h") {
- t.Fatalf("missing \"help, h\": %q", s)
- }
-}
-
-func Test_helpSubcommand_Action_ErrorIfNoTopic(t *testing.T) {
- app := NewApp()
-
- set := flag.NewFlagSet("test", 0)
- set.Parse([]string{"foo"})
-
- c := NewContext(app, set, nil)
-
- err := helpSubcommand.Action.(func(*Context) error)(c)
-
- if err == nil {
- t.Fatalf("expected error from helpCommand.Action(), but got nil")
- }
-
- exitErr, ok := err.(*ExitError)
- if !ok {
- t.Fatalf("expected ExitError from helpCommand.Action(), but instead got: %v", err.Error())
- }
-
- if !strings.HasPrefix(exitErr.Error(), "No help topic for") {
- t.Fatalf("expected an unknown help topic error, but got: %v", exitErr.Error())
- }
-
- if exitErr.exitCode != 3 {
- t.Fatalf("expected exit value = 3, got %d instead", exitErr.exitCode)
- }
-}
-
-func TestShowAppHelp_CommandAliases(t *testing.T) {
- app := &App{
- Commands: []Command{
- {
- Name: "frobbly",
- Aliases: []string{"fr", "frob"},
- Action: func(ctx *Context) error {
- return nil
- },
- },
- },
- }
-
- output := &bytes.Buffer{}
- app.Writer = output
- app.Run([]string{"foo", "--help"})
-
- if !strings.Contains(output.String(), "frobbly, fr, frob") {
- t.Errorf("expected output to include all command aliases; got: %q", output.String())
- }
-}
-
-func TestShowCommandHelp_CommandAliases(t *testing.T) {
- app := &App{
- Commands: []Command{
- {
- Name: "frobbly",
- Aliases: []string{"fr", "frob", "bork"},
- Action: func(ctx *Context) error {
- return nil
- },
- },
- },
- }
-
- output := &bytes.Buffer{}
- app.Writer = output
- app.Run([]string{"foo", "help", "fr"})
-
- if !strings.Contains(output.String(), "frobbly") {
- t.Errorf("expected output to include command name; got: %q", output.String())
- }
-
- if strings.Contains(output.String(), "bork") {
- t.Errorf("expected output to exclude command aliases; got: %q", output.String())
- }
-}
-
-func TestShowSubcommandHelp_CommandAliases(t *testing.T) {
- app := &App{
- Commands: []Command{
- {
- Name: "frobbly",
- Aliases: []string{"fr", "frob", "bork"},
- Action: func(ctx *Context) error {
- return nil
- },
- },
- },
- }
-
- output := &bytes.Buffer{}
- app.Writer = output
- app.Run([]string{"foo", "help"})
-
- if !strings.Contains(output.String(), "frobbly, fr, frob, bork") {
- t.Errorf("expected output to include all command aliases; got: %q", output.String())
- }
-}
-
-func TestShowAppHelp_HiddenCommand(t *testing.T) {
- app := &App{
- Commands: []Command{
- {
- Name: "frobbly",
- Action: func(ctx *Context) error {
- return nil
- },
- },
- {
- Name: "secretfrob",
- Hidden: true,
- Action: func(ctx *Context) error {
- return nil
- },
- },
- },
- }
-
- output := &bytes.Buffer{}
- app.Writer = output
- app.Run([]string{"app", "--help"})
-
- if strings.Contains(output.String(), "secretfrob") {
- t.Errorf("expected output to exclude \"secretfrob\"; got: %q", output.String())
- }
-
- if !strings.Contains(output.String(), "frobbly") {
- t.Errorf("expected output to include \"frobbly\"; got: %q", output.String())
- }
-}
diff --git a/vendor/src/github.com/codegangsta/cli/helpers_test.go b/vendor/src/github.com/codegangsta/cli/helpers_test.go
deleted file mode 100644
index 109ea7a..0000000
--- a/vendor/src/github.com/codegangsta/cli/helpers_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package cli
-
-import (
- "os"
- "reflect"
- "runtime"
- "strings"
- "testing"
-)
-
-var (
- wd, _ = os.Getwd()
-)
-
-func expect(t *testing.T, a interface{}, b interface{}) {
- _, fn, line, _ := runtime.Caller(1)
- fn = strings.Replace(fn, wd+"/", "", -1)
-
- if !reflect.DeepEqual(a, b) {
- t.Errorf("(%s:%d) Expected %v (type %v) - Got %v (type %v)", fn, line, b, reflect.TypeOf(b), a, reflect.TypeOf(a))
- }
-}
-
-func refute(t *testing.T, a interface{}, b interface{}) {
- if reflect.DeepEqual(a, b) {
- t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
- }
-}
diff --git a/vendor/src/github.com/codegangsta/cli/runtests b/vendor/src/github.com/codegangsta/cli/runtests
deleted file mode 100644
index ee22bde..0000000
--- a/vendor/src/github.com/codegangsta/cli/runtests
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python
-from __future__ import print_function
-
-import argparse
-import os
-import sys
-import tempfile
-
-from subprocess import check_call, check_output
-
-
-PACKAGE_NAME = os.environ.get(
- 'CLI_PACKAGE_NAME', 'github.com/urfave/cli'
-)
-
-
-def main(sysargs=sys.argv[:]):
- targets = {
- 'vet': _vet,
- 'test': _test,
- 'gfmrun': _gfmrun,
- 'toc': _toc,
- 'gen': _gen,
- }
-
- parser = argparse.ArgumentParser()
- parser.add_argument(
- 'target', nargs='?', choices=tuple(targets.keys()), default='test'
- )
- args = parser.parse_args(sysargs[1:])
-
- targets[args.target]()
- return 0
-
-
-def _test():
- if check_output('go version'.split()).split()[2] < 'go1.2':
- _run('go test -v .')
- return
-
- coverprofiles = []
- for subpackage in ['', 'altsrc']:
- coverprofile = 'cli.coverprofile'
- if subpackage != '':
- coverprofile = '{}.coverprofile'.format(subpackage)
-
- coverprofiles.append(coverprofile)
-
- _run('go test -v'.split() + [
- '-coverprofile={}'.format(coverprofile),
- ('{}/{}'.format(PACKAGE_NAME, subpackage)).rstrip('/')
- ])
-
- combined_name = _combine_coverprofiles(coverprofiles)
- _run('go tool cover -func={}'.format(combined_name))
- os.remove(combined_name)
-
-
-def _gfmrun():
- go_version = check_output('go version'.split()).split()[2]
- if go_version < 'go1.3':
- print('runtests: skip on {}'.format(go_version), file=sys.stderr)
- return
- _run(['gfmrun', '-c', str(_gfmrun_count()), '-s', 'README.md'])
-
-
-def _vet():
- _run('go vet ./...')
-
-
-def _toc():
- _run('node_modules/.bin/markdown-toc -i README.md')
- _run('git diff --exit-code')
-
-
-def _gen():
- go_version = check_output('go version'.split()).split()[2]
- if go_version < 'go1.5':
- print('runtests: skip on {}'.format(go_version), file=sys.stderr)
- return
-
- _run('go generate ./...')
- _run('git diff --exit-code')
-
-
-def _run(command):
- if hasattr(command, 'split'):
- command = command.split()
- print('runtests: {}'.format(' '.join(command)), file=sys.stderr)
- check_call(command)
-
-
-def _gfmrun_count():
- with open('README.md') as infile:
- lines = infile.read().splitlines()
- return len(filter(_is_go_runnable, lines))
-
-
-def _is_go_runnable(line):
- return line.startswith('package main')
-
-
-def _combine_coverprofiles(coverprofiles):
- combined = tempfile.NamedTemporaryFile(
- suffix='.coverprofile', delete=False
- )
- combined.write('mode: set\n')
-
- for coverprofile in coverprofiles:
- with open(coverprofile, 'r') as infile:
- for line in infile.readlines():
- if not line.startswith('mode: '):
- combined.write(line)
-
- combined.flush()
- name = combined.name
- combined.close()
- return name
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/accounts.go b/vendor/src/github.com/dghubble/go-twitter/twitter/accounts.go
deleted file mode 100644
index b5e6ef7..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/accounts.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package twitter
-
-import (
- "net/http"
-
- "github.com/dghubble/sling"
-)
-
-// AccountService provides a method for account credential verification.
-type AccountService struct {
- sling *sling.Sling
-}
-
-// newAccountService returns a new AccountService.
-func newAccountService(sling *sling.Sling) *AccountService {
- return &AccountService{
- sling: sling.Path("account/"),
- }
-}
-
-// AccountVerifyParams are the params for AccountService.VerifyCredentials.
-type AccountVerifyParams struct {
- IncludeEntities *bool `url:"include_entities,omitempty"`
- SkipStatus *bool `url:"skip_status,omitempty"`
- IncludeEmail *bool `url:"include_email,omitempty"`
-}
-
-// VerifyCredentials returns the authorized user if credentials are valid and
-// returns an error otherwise.
-// Requires a user auth context.
-// https://dev.twitter.com/rest/reference/get/account/verify_credentials
-func (s *AccountService) VerifyCredentials(params *AccountVerifyParams) (*User, *http.Response, error) {
- user := new(User)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("verify_credentials.json").QueryStruct(params).Receive(user, apiError)
- return user, resp, relevantError(err, *apiError)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/accounts_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/accounts_test.go
deleted file mode 100644
index a98680f..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/accounts_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package twitter
-
-import (
- "fmt"
- "net/http"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestAccountService_VerifyCredentials(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/account/verify_credentials.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"include_entities": "false", "include_email": "true"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"name": "Dalton Hubble", "id": 623265148}`)
- })
-
- client := NewClient(httpClient)
- user, _, err := client.Accounts.VerifyCredentials(&AccountVerifyParams{IncludeEntities: Bool(false), IncludeEmail: Bool(true)})
- expected := &User{Name: "Dalton Hubble", ID: 623265148}
- assert.Nil(t, err)
- assert.Equal(t, expected, user)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/backoffs.go b/vendor/src/github.com/dghubble/go-twitter/twitter/backoffs.go
deleted file mode 100644
index d3a5321..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/backoffs.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package twitter
-
-import (
- "time"
-
- "github.com/cenkalti/backoff"
-)
-
-func newExponentialBackOff() *backoff.ExponentialBackOff {
- b := backoff.NewExponentialBackOff()
- b.InitialInterval = 5 * time.Second
- b.Multiplier = 2.0
- b.MaxInterval = 320 * time.Second
- b.Reset()
- return b
-}
-
-func newAggressiveExponentialBackOff() *backoff.ExponentialBackOff {
- b := backoff.NewExponentialBackOff()
- b.InitialInterval = 1 * time.Minute
- b.Multiplier = 2.0
- b.MaxInterval = 16 * time.Minute
- b.Reset()
- return b
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/backoffs_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/backoffs_test.go
deleted file mode 100644
index 49bfd03..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/backoffs_test.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package twitter
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestNewExponentialBackOff(t *testing.T) {
- b := newExponentialBackOff()
- assert.Equal(t, 5*time.Second, b.InitialInterval)
- assert.Equal(t, 2.0, b.Multiplier)
- assert.Equal(t, 320*time.Second, b.MaxInterval)
-}
-
-func TestNewAggressiveExponentialBackOff(t *testing.T) {
- b := newAggressiveExponentialBackOff()
- assert.Equal(t, 1*time.Minute, b.InitialInterval)
- assert.Equal(t, 2.0, b.Multiplier)
- assert.Equal(t, 16*time.Minute, b.MaxInterval)
-}
-
-// BackoffRecorder is an implementation of backoff.BackOff that records
-// calls to NextBackOff and Reset for later inspection in tests.
-type BackOffRecorder struct {
- Count int
-}
-
-func (b *BackOffRecorder) NextBackOff() time.Duration {
- b.Count++
- return 1 * time.Nanosecond
-}
-
-func (b *BackOffRecorder) Reset() {
- b.Count = 0
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/demux.go b/vendor/src/github.com/dghubble/go-twitter/twitter/demux.go
deleted file mode 100644
index 29e21f2..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/demux.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package twitter
-
-// A Demux receives interface{} messages individually or from a channel and
-// sends those messages to one or more outputs determined by the
-// implementation.
-type Demux interface {
- Handle(message interface{})
- HandleChan(messages <-chan interface{})
-}
-
-// SwitchDemux receives messages and uses a type switch to send each typed
-// message to a handler function.
-type SwitchDemux struct {
- All func(message interface{})
- Tweet func(tweet *Tweet)
- DM func(dm *DirectMessage)
- StatusDeletion func(deletion *StatusDeletion)
- LocationDeletion func(LocationDeletion *LocationDeletion)
- StreamLimit func(limit *StreamLimit)
- StatusWithheld func(statusWithheld *StatusWithheld)
- UserWithheld func(userWithheld *UserWithheld)
- StreamDisconnect func(disconnect *StreamDisconnect)
- Warning func(warning *StallWarning)
- FriendsList func(friendsList *FriendsList)
- Event func(event *Event)
- Other func(message interface{})
-}
-
-// NewSwitchDemux returns a new SwitchMux which has NoOp handler functions.
-func NewSwitchDemux() SwitchDemux {
- return SwitchDemux{
- All: func(message interface{}) {},
- Tweet: func(tweet *Tweet) {},
- DM: func(dm *DirectMessage) {},
- StatusDeletion: func(deletion *StatusDeletion) {},
- LocationDeletion: func(LocationDeletion *LocationDeletion) {},
- StreamLimit: func(limit *StreamLimit) {},
- StatusWithheld: func(statusWithheld *StatusWithheld) {},
- UserWithheld: func(userWithheld *UserWithheld) {},
- StreamDisconnect: func(disconnect *StreamDisconnect) {},
- Warning: func(warning *StallWarning) {},
- FriendsList: func(friendsList *FriendsList) {},
- Event: func(event *Event) {},
- Other: func(message interface{}) {},
- }
-}
-
-// Handle determines the type of a message and calls the corresponding receiver
-// function with the typed message. All messages are passed to the All func.
-// Messages with unmatched types are passed to the Other func.
-func (d SwitchDemux) Handle(message interface{}) {
- d.All(message)
- switch msg := message.(type) {
- case *Tweet:
- d.Tweet(msg)
- case *DirectMessage:
- d.DM(msg)
- case *StatusDeletion:
- d.StatusDeletion(msg)
- case *LocationDeletion:
- d.LocationDeletion(msg)
- case *StreamLimit:
- d.StreamLimit(msg)
- case *StatusWithheld:
- d.StatusWithheld(msg)
- case *UserWithheld:
- d.UserWithheld(msg)
- case *StreamDisconnect:
- d.StreamDisconnect(msg)
- case *StallWarning:
- d.Warning(msg)
- case *FriendsList:
- d.FriendsList(msg)
- case *Event:
- d.Event(msg)
- default:
- d.Other(msg)
- }
-}
-
-// HandleChan receives messages and calls the corresponding receiver function
-// with the typed message. All messages are passed to the All func. Messages
-// with unmatched type are passed to the Other func.
-func (d SwitchDemux) HandleChan(messages <-chan interface{}) {
- for message := range messages {
- d.Handle(message)
- }
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/demux_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/demux_test.go
deleted file mode 100644
index ccdce52..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/demux_test.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package twitter
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestDemux_Handle(t *testing.T) {
- messages, expectedCounts := exampleMessages()
- counts := &counter{}
- demux := newCounterDemux(counts)
- for _, message := range messages {
- demux.Handle(message)
- }
- assert.Equal(t, expectedCounts, counts)
-}
-
-func TestDemux_HandleChan(t *testing.T) {
- messages, expectedCounts := exampleMessages()
- counts := &counter{}
- demux := newCounterDemux(counts)
- ch := make(chan interface{})
- // stream messages into channel
- go func() {
- for _, msg := range messages {
- ch <- msg
- }
- close(ch)
- }()
- // handle channel messages until exhausted
- demux.HandleChan(ch)
- assert.Equal(t, expectedCounts, counts)
-}
-
-// counter counts stream messages by type for testing.
-type counter struct {
- all int
- tweet int
- dm int
- statusDeletion int
- locationDeletion int
- streamLimit int
- statusWithheld int
- userWithheld int
- streamDisconnect int
- stallWarning int
- friendsList int
- event int
- other int
-}
-
-// newCounterDemux returns a Demux which counts message types.
-func newCounterDemux(counter *counter) Demux {
- demux := NewSwitchDemux()
- demux.All = func(interface{}) {
- counter.all++
- }
- demux.Tweet = func(*Tweet) {
- counter.tweet++
- }
- demux.DM = func(*DirectMessage) {
- counter.dm++
- }
- demux.StatusDeletion = func(*StatusDeletion) {
- counter.statusDeletion++
- }
- demux.LocationDeletion = func(*LocationDeletion) {
- counter.locationDeletion++
- }
- demux.StreamLimit = func(*StreamLimit) {
- counter.streamLimit++
- }
- demux.StatusWithheld = func(*StatusWithheld) {
- counter.statusWithheld++
- }
- demux.UserWithheld = func(*UserWithheld) {
- counter.userWithheld++
- }
- demux.StreamDisconnect = func(*StreamDisconnect) {
- counter.streamDisconnect++
- }
- demux.Warning = func(*StallWarning) {
- counter.stallWarning++
- }
- demux.FriendsList = func(*FriendsList) {
- counter.friendsList++
- }
- demux.Event = func(*Event) {
- counter.event++
- }
- demux.Other = func(interface{}) {
- counter.other++
- }
- return demux
-}
-
-// examples messages returns a test stream of messages and the expected
-// counts of each message type.
-func exampleMessages() (messages []interface{}, expectedCounts *counter) {
- var (
- tweet = &Tweet{}
- dm = &DirectMessage{}
- statusDeletion = &StatusDeletion{}
- locationDeletion = &LocationDeletion{}
- streamLimit = &StreamLimit{}
- statusWithheld = &StatusWithheld{}
- userWithheld = &UserWithheld{}
- streamDisconnect = &StreamDisconnect{}
- stallWarning = &StallWarning{}
- friendsList = &FriendsList{}
- event = &Event{}
- otherA = func() {}
- otherB = struct{}{}
- )
- messages = []interface{}{tweet, dm, statusDeletion, locationDeletion,
- streamLimit, statusWithheld, userWithheld, streamDisconnect,
- stallWarning, friendsList, event, otherA, otherB}
- expectedCounts = &counter{
- all: len(messages),
- tweet: 1,
- dm: 1,
- statusDeletion: 1,
- locationDeletion: 1,
- streamLimit: 1,
- statusWithheld: 1,
- userWithheld: 1,
- streamDisconnect: 1,
- stallWarning: 1,
- friendsList: 1,
- event: 1,
- other: 2,
- }
- return messages, expectedCounts
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/direct_messages.go b/vendor/src/github.com/dghubble/go-twitter/twitter/direct_messages.go
deleted file mode 100644
index 3d91ec5..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/direct_messages.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package twitter
-
-import (
- "net/http"
-
- "github.com/dghubble/sling"
-)
-
-// DirectMessage is a direct message to a single recipient.
-type DirectMessage struct {
- CreatedAt string `json:"created_at"`
- Entities *Entities `json:"entities"`
- ID int64 `json:"id"`
- IDStr string `json:"id_str"`
- Recipient *User `json:"recipient"`
- RecipientID int64 `json:"recipient_id"`
- RecipientScreenName string `json:"recipient_screen_name"`
- Sender *User `json:"sender"`
- SenderID int64 `json:"sender_id"`
- SenderScreenName string `json:"sender_screen_name"`
- Text string `json:"text"`
-}
-
-// DirectMessageService provides methods for accessing Twitter direct message
-// API endpoints.
-type DirectMessageService struct {
- baseSling *sling.Sling
- sling *sling.Sling
-}
-
-// newDirectMessageService returns a new DirectMessageService.
-func newDirectMessageService(sling *sling.Sling) *DirectMessageService {
- return &DirectMessageService{
- baseSling: sling.New(),
- sling: sling.Path("direct_messages/"),
- }
-}
-
-// directMessageShowParams are the parameters for DirectMessageService.Show
-type directMessageShowParams struct {
- ID int64 `url:"id,omitempty"`
-}
-
-// Show returns the requested Direct Message.
-// Requires a user auth context with DM scope.
-// https://dev.twitter.com/rest/reference/get/direct_messages/show
-func (s *DirectMessageService) Show(id int64) (*DirectMessage, *http.Response, error) {
- params := &directMessageShowParams{ID: id}
- dm := new(DirectMessage)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("show.json").QueryStruct(params).Receive(dm, apiError)
- return dm, resp, relevantError(err, *apiError)
-}
-
-// DirectMessageGetParams are the parameters for DirectMessageService.Get
-type DirectMessageGetParams struct {
- SinceID int64 `url:"since_id,omitempty"`
- MaxID int64 `url:"max_id,omitempty"`
- Count int `url:"count,omitempty"`
- IncludeEntities *bool `url:"include_entities,omitempty"`
- SkipStatus *bool `url:"skip_status,omitempty"`
-}
-
-// Get returns recent Direct Messages received by the authenticated user.
-// Requires a user auth context with DM scope.
-// https://dev.twitter.com/rest/reference/get/direct_messages
-func (s *DirectMessageService) Get(params *DirectMessageGetParams) ([]DirectMessage, *http.Response, error) {
- dms := new([]DirectMessage)
- apiError := new(APIError)
- resp, err := s.baseSling.New().Get("direct_messages.json").QueryStruct(params).Receive(dms, apiError)
- return *dms, resp, relevantError(err, *apiError)
-}
-
-// DirectMessageSentParams are the parameters for DirectMessageService.Sent
-type DirectMessageSentParams struct {
- SinceID int64 `url:"since_id,omitempty"`
- MaxID int64 `url:"max_id,omitempty"`
- Count int `url:"count,omitempty"`
- Page int `url:"page,omitempty"`
- IncludeEntities *bool `url:"include_entities,omitempty"`
-}
-
-// Sent returns recent Direct Messages sent by the authenticated user.
-// Requires a user auth context with DM scope.
-// https://dev.twitter.com/rest/reference/get/direct_messages/sent
-func (s *DirectMessageService) Sent(params *DirectMessageSentParams) ([]DirectMessage, *http.Response, error) {
- dms := new([]DirectMessage)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("sent.json").QueryStruct(params).Receive(dms, apiError)
- return *dms, resp, relevantError(err, *apiError)
-}
-
-// DirectMessageNewParams are the parameters for DirectMessageService.New
-type DirectMessageNewParams struct {
- UserID int64 `url:"user_id,omitempty"`
- ScreenName string `url:"screen_name,omitempty"`
- Text string `url:"text"`
-}
-
-// New sends a new Direct Message to a specified user as the authenticated
-// user.
-// Requires a user auth context with DM scope.
-// https://dev.twitter.com/rest/reference/post/direct_messages/new
-func (s *DirectMessageService) New(params *DirectMessageNewParams) (*DirectMessage, *http.Response, error) {
- dm := new(DirectMessage)
- apiError := new(APIError)
- resp, err := s.sling.New().Post("new.json").BodyForm(params).Receive(dm, apiError)
- return dm, resp, relevantError(err, *apiError)
-}
-
-// DirectMessageDestroyParams are the parameters for DirectMessageService.Destroy
-type DirectMessageDestroyParams struct {
- ID int64 `url:"id,omitempty"`
- IncludeEntities *bool `url:"include_entities,omitempty"`
-}
-
-// Destroy deletes the Direct Message with the given id and returns it if
-// successful.
-// Requires a user auth context with DM scope.
-// https://dev.twitter.com/rest/reference/post/direct_messages/destroy
-func (s *DirectMessageService) Destroy(id int64, params *DirectMessageDestroyParams) (*DirectMessage, *http.Response, error) {
- if params == nil {
- params = &DirectMessageDestroyParams{}
- }
- params.ID = id
- dm := new(DirectMessage)
- apiError := new(APIError)
- resp, err := s.sling.New().Post("destroy.json").BodyForm(params).Receive(dm, apiError)
- return dm, resp, relevantError(err, *apiError)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/direct_messages_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/direct_messages_test.go
deleted file mode 100644
index c0ea242..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/direct_messages_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package twitter
-
-import (
- "fmt"
- "net/http"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-var (
- testDM = DirectMessage{
- ID: 240136858829479936,
- Recipient: &User{ScreenName: "theSeanCook"},
- Sender: &User{ScreenName: "s0c1alm3dia"},
- Text: "hello world",
- }
- testDMIDStr = "240136858829479936"
- testDMJSON = `{"id": 240136858829479936,"recipient": {"screen_name": "theSeanCook"},"sender": {"screen_name": "s0c1alm3dia"},"text": "hello world"}`
-)
-
-func TestDirectMessageService_Show(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/direct_messages/show.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"id": testDMIDStr}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, testDMJSON)
- })
-
- client := NewClient(httpClient)
- dms, _, err := client.DirectMessages.Show(testDM.ID)
- assert.Nil(t, err)
- assert.Equal(t, &testDM, dms)
-}
-
-func TestDirectMessageService_Get(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/direct_messages.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"since_id": "589147592367431680", "count": "1"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `[`+testDMJSON+`]`)
- })
-
- client := NewClient(httpClient)
- params := &DirectMessageGetParams{SinceID: 589147592367431680, Count: 1}
- dms, _, err := client.DirectMessages.Get(params)
- expected := []DirectMessage{testDM}
- assert.Nil(t, err)
- assert.Equal(t, expected, dms)
-}
-
-func TestDirectMessageService_Sent(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/direct_messages/sent.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"since_id": "589147592367431680", "count": "1"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `[`+testDMJSON+`]`)
- })
-
- client := NewClient(httpClient)
- params := &DirectMessageSentParams{SinceID: 589147592367431680, Count: 1}
- dms, _, err := client.DirectMessages.Sent(params)
- expected := []DirectMessage{testDM}
- assert.Nil(t, err)
- assert.Equal(t, expected, dms)
-}
-
-func TestDirectMessageService_New(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/direct_messages/new.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "POST", r)
- assertPostForm(t, map[string]string{"screen_name": "theseancook", "text": "hello world"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, testDMJSON)
- })
-
- client := NewClient(httpClient)
- params := &DirectMessageNewParams{ScreenName: "theseancook", Text: "hello world"}
- dm, _, err := client.DirectMessages.New(params)
- assert.Nil(t, err)
- assert.Equal(t, &testDM, dm)
-}
-
-func TestDirectMessageService_Destroy(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/direct_messages/destroy.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "POST", r)
- assertPostForm(t, map[string]string{"id": testDMIDStr}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, testDMJSON)
- })
-
- client := NewClient(httpClient)
- dm, _, err := client.DirectMessages.Destroy(testDM.ID, nil)
- assert.Nil(t, err)
- assert.Equal(t, &testDM, dm)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/doc.go b/vendor/src/github.com/dghubble/go-twitter/twitter/doc.go
deleted file mode 100644
index 47136c5..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/doc.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-Package twitter provides a Client for the Twitter API.
-
-
-The twitter package provides a Client for accessing the Twitter API. Here are
-some example requests.
-
- // Twitter client
- client := twitter.NewClient(httpClient)
- // Home Timeline
- tweets, resp, err := client.Timelines.HomeTimeline(&HomeTimelineParams{})
- // Send a Tweet
- tweet, resp, err := client.Statuses.Update("just setting up my twttr", nil)
- // Status Show
- tweet, resp, err := client.Statuses.Show(585613041028431872, nil)
- // User Show
- params := &twitter.UserShowParams{ScreenName: "dghubble"}
- user, resp, err := client.Users.Show(params)
- // Followers
- followers, resp, err := client.Followers.List(&FollowerListParams{})
-
-Required parameters are passed as positional arguments. Optional parameters
-are passed in a typed params struct (or pass nil).
-
-Authentication
-
-By design, the Twitter Client accepts any http.Client so user auth (OAuth1) or
-application auth (OAuth2) requests can be made by using the appropriate
-authenticated client. Use the https://github.com/dghubble/oauth1 and
-https://github.com/golang/oauth2 packages to obtain an http.Client which
-transparently authorizes requests.
-
-For example, make requests as a consumer application on behalf of a user who
-has granted access, with OAuth1.
-
- // OAuth1
- import (
- "github.com/dghubble/go-twitter/twitter"
- "github.com/dghubble/oauth1"
- )
-
- config := oauth1.NewConfig("consumerKey", "consumerSecret")
- token := oauth1.NewToken("accessToken", "accessSecret")
- // http.Client will automatically authorize Requests
- httpClient := config.Client(oauth1.NoContext, token)
-
- // twitter client
- client := twitter.NewClient(httpClient)
-
-If no user auth context is needed, make requests as your application with
-application auth.
-
- // OAuth2
- import (
- "github.com/dghubble/go-twitter/twitter"
- "golang.org/x/oauth2"
- )
-
- config := &oauth2.Config{}
- token := &oauth2.Token{AccessToken: accessToken}
- // http.Client will automatically authorize Requests
- httpClient := config.Client(oauth2.NoContext, token)
-
- // twitter client
- client := twitter.NewClient(httpClient)
-
-To implement Login with Twitter, see https://github.com/dghubble/gologin.
-
-*/
-package twitter
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/entities.go b/vendor/src/github.com/dghubble/go-twitter/twitter/entities.go
deleted file mode 100644
index 3a159b9..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/entities.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package twitter
-
-// Entities represent metadata and context info parsed from Twitter components.
-// https://dev.twitter.com/overview/api/entities
-// TODO: symbols
-type Entities struct {
- Hashtags []HashtagEntity `json:"hashtags"`
- Media []MediaEntity `json:"media"`
- Urls []URLEntity `json:"urls"`
- UserMentions []MentionEntity `json:"user_mentions"`
-}
-
-// HashtagEntity represents a hashtag which has been parsed from text.
-type HashtagEntity struct {
- Indices Indices `json:"indices"`
- Text string `json:"text"`
-}
-
-// URLEntity represents a URL which has been parsed from text.
-type URLEntity struct {
- Indices Indices `json:"indices"`
- DisplayURL string `json:"display_url"`
- ExpandedURL string `json:"expanded_url"`
- URL string `json:"url"`
-}
-
-// MediaEntity represents media elements associated with a Tweet.
-// TODO: add Sizes
-type MediaEntity struct {
- URLEntity
- ID int64 `json:"id"`
- IDStr string `json:"id_str"`
- MediaURL string `json:"media_url"`
- MediaURLHttps string `json:"media_url_https"`
- SourceStatusID int64 `json:"source_status_id"`
- SourceStatusIDStr string `json:"source_status_id_str"`
- Type string `json:"type"`
-}
-
-// MentionEntity represents Twitter user mentions parsed from text.
-type MentionEntity struct {
- Indices Indices `json:"indices"`
- ID int64 `json:"id"`
- IDStr string `json:"id_str"`
- Name string `json:"name"`
- ScreenName string `json:"screen_name"`
-}
-
-// UserEntities contain Entities parsed from User url and description fields.
-// https://dev.twitter.com/overview/api/entities-in-twitter-objects#users
-type UserEntities struct {
- URL Entities `json:"url"`
- Description Entities `json:"description"`
-}
-
-// ExtendedEntity contains media information.
-// https://dev.twitter.com/overview/api/entities-in-twitter-objects#extended_entities
-type ExtendedEntity struct {
- Media []MediaEntity `json:"media"`
-}
-
-// Indices represent the start and end offsets within text.
-type Indices [2]int
-
-// Start returns the index at which an entity starts, inclusive.
-func (i Indices) Start() int {
- return i[0]
-}
-
-// End returns the index at which an entity ends, exclusive.
-func (i Indices) End() int {
- return i[1]
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/entities_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/entities_test.go
deleted file mode 100644
index fdfde94..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/entities_test.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package twitter
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestIndices(t *testing.T) {
- cases := []struct {
- pair Indices
- expectedStart int
- expectedEnd int
- }{
- {Indices{}, 0, 0},
- {Indices{25, 47}, 25, 47},
- }
- for _, c := range cases {
- assert.Equal(t, c.expectedStart, c.pair.Start())
- assert.Equal(t, c.expectedEnd, c.pair.End())
- }
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/errors.go b/vendor/src/github.com/dghubble/go-twitter/twitter/errors.go
deleted file mode 100644
index 9208a1a..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/errors.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package twitter
-
-import (
- "fmt"
-)
-
-// APIError represents a Twitter API Error response
-// https://dev.twitter.com/overview/api/response-codes
-type APIError struct {
- Errors []ErrorDetail `json:"errors"`
-}
-
-// ErrorDetail represents an individual item in an APIError.
-type ErrorDetail struct {
- Message string `json:"message"`
- Code int `json:"code"`
-}
-
-func (e APIError) Error() string {
- if len(e.Errors) > 0 {
- err := e.Errors[0]
- return fmt.Sprintf("twitter: %d %v", err.Code, err.Message)
- }
- return ""
-}
-
-// Empty returns true if empty. Otherwise, at least 1 error message/code is
-// present and false is returned.
-func (e APIError) Empty() bool {
- if len(e.Errors) == 0 {
- return true
- }
- return false
-}
-
-// relevantError returns any non-nil http-related error (creating the request,
-// getting the response, decoding) if any. If the decoded apiError is non-zero
-// the apiError is returned. Otherwise, no errors occurred, returns nil.
-func relevantError(httpError error, apiError APIError) error {
- if httpError != nil {
- return httpError
- }
- if apiError.Empty() {
- return nil
- }
- return apiError
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/errors_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/errors_test.go
deleted file mode 100644
index 806db75..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/errors_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package twitter
-
-import (
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-var errAPI = APIError{
- Errors: []ErrorDetail{
- ErrorDetail{Message: "Status is a duplicate", Code: 187},
- },
-}
-var errHTTP = fmt.Errorf("unknown host")
-
-func TestAPIError_Error(t *testing.T) {
- err := APIError{}
- if assert.Error(t, err) {
- assert.Equal(t, "", err.Error())
- }
- if assert.Error(t, errAPI) {
- assert.Equal(t, "twitter: 187 Status is a duplicate", errAPI.Error())
- }
-}
-
-func TestAPIError_Empty(t *testing.T) {
- err := APIError{}
- assert.True(t, err.Empty())
- assert.False(t, errAPI.Empty())
-}
-
-func TestRelevantError(t *testing.T) {
- cases := []struct {
- httpError error
- apiError APIError
- expected error
- }{
- {nil, APIError{}, nil},
- {nil, errAPI, errAPI},
- {errHTTP, APIError{}, errHTTP},
- {errHTTP, errAPI, errHTTP},
- }
- for _, c := range cases {
- err := relevantError(c.httpError, c.apiError)
- assert.Equal(t, c.expected, err)
- }
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/followers.go b/vendor/src/github.com/dghubble/go-twitter/twitter/followers.go
deleted file mode 100644
index a1fff18..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/followers.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package twitter
-
-import (
- "net/http"
-
- "github.com/dghubble/sling"
-)
-
-// FollowerIDs is a cursored collection of follower ids.
-type FollowerIDs struct {
- IDs []int64 `json:"ids"`
- NextCursor int64 `json:"next_cursor"`
- NextCursorStr string `json:"next_cursor_str"`
- PreviousCursor int64 `json:"previous_cursor"`
- PreviousCursorStr string `json:"previous_cursor_str"`
-}
-
-// Followers is a cursored collection of followers.
-type Followers struct {
- Users []User `json:"users"`
- NextCursor int64 `json:"next_cursor"`
- NextCursorStr string `json:"next_cursor_str"`
- PreviousCursor int64 `json:"previous_cursor"`
- PreviousCursorStr string `json:"previous_cursor_str"`
-}
-
-// FollowerService provides methods for accessing Twitter followers endpoints.
-type FollowerService struct {
- sling *sling.Sling
-}
-
-// newFollowerService returns a new FollowerService.
-func newFollowerService(sling *sling.Sling) *FollowerService {
- return &FollowerService{
- sling: sling.Path("followers/"),
- }
-}
-
-// FollowerIDParams are the parameters for FollowerService.Ids
-type FollowerIDParams struct {
- UserID int64 `url:"user_id,omitempty"`
- ScreenName string `url:"screen_name,omitempty"`
- Cursor int64 `url:"cursor,omitempty"`
- Count int `url:"count,omitempty"`
-}
-
-// IDs returns a cursored collection of user ids following the specified user.
-// https://dev.twitter.com/rest/reference/get/followers/ids
-func (s *FollowerService) IDs(params *FollowerIDParams) (*FollowerIDs, *http.Response, error) {
- ids := new(FollowerIDs)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("ids.json").QueryStruct(params).Receive(ids, apiError)
- return ids, resp, relevantError(err, *apiError)
-}
-
-// FollowerListParams are the parameters for FollowerService.List
-type FollowerListParams struct {
- UserID int64 `url:"user_id,omitempty"`
- ScreenName string `url:"screen_name,omitempty"`
- Cursor int `url:"cursor,omitempty"`
- Count int `url:"count,omitempty"`
- SkipStatus *bool `url:"skip_status,omitempty"`
- IncludeUserEntities *bool `url:"include_user_entities,omitempty"`
-}
-
-// List returns a cursored collection of Users following the specified user.
-// https://dev.twitter.com/rest/reference/get/followers/list
-func (s *FollowerService) List(params *FollowerListParams) (*Followers, *http.Response, error) {
- followers := new(Followers)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("list.json").QueryStruct(params).Receive(followers, apiError)
- return followers, resp, relevantError(err, *apiError)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/followers_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/followers_test.go
deleted file mode 100644
index cab8472..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/followers_test.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package twitter
-
-import (
- "fmt"
- "net/http"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestFollowerService_Ids(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/followers/ids.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"user_id": "623265148", "count": "5", "cursor": "1516933260114270762"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"ids":[178082406,3318241001,1318020818,191714329,376703838],"next_cursor":1516837838944119498,"next_cursor_str":"1516837838944119498","previous_cursor":-1516924983503961435,"previous_cursor_str":"-1516924983503961435"}`)
- })
- expected := &FollowerIDs{
- IDs: []int64{178082406, 3318241001, 1318020818, 191714329, 376703838},
- NextCursor: 1516837838944119498,
- NextCursorStr: "1516837838944119498",
- PreviousCursor: -1516924983503961435,
- PreviousCursorStr: "-1516924983503961435",
- }
-
- client := NewClient(httpClient)
- params := &FollowerIDParams{
- UserID: 623265148,
- Count: 5,
- Cursor: 1516933260114270762,
- }
- followerIDs, _, err := client.Followers.IDs(params)
- assert.Nil(t, err)
- assert.Equal(t, expected, followerIDs)
-}
-
-func TestFollowerService_List(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/followers/list.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"screen_name": "dghubble", "count": "5", "cursor": "1516933260114270762", "skip_status": "true", "include_user_entities": "false"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"users": [{"id": 123}], "next_cursor":1516837838944119498,"next_cursor_str":"1516837838944119498","previous_cursor":-1516924983503961435,"previous_cursor_str":"-1516924983503961435"}`)
- })
- expected := &Followers{
- Users: []User{User{ID: 123}},
- NextCursor: 1516837838944119498,
- NextCursorStr: "1516837838944119498",
- PreviousCursor: -1516924983503961435,
- PreviousCursorStr: "-1516924983503961435",
- }
-
- client := NewClient(httpClient)
- params := &FollowerListParams{
- ScreenName: "dghubble",
- Count: 5,
- Cursor: 1516933260114270762,
- SkipStatus: Bool(true),
- IncludeUserEntities: Bool(false),
- }
- followers, _, err := client.Followers.List(params)
- assert.Nil(t, err)
- assert.Equal(t, expected, followers)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/statuses.go b/vendor/src/github.com/dghubble/go-twitter/twitter/statuses.go
deleted file mode 100644
index 4219d17..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/statuses.go
+++ /dev/null
@@ -1,251 +0,0 @@
-package twitter
-
-import (
- "fmt"
- "net/http"
-
- "github.com/dghubble/sling"
-)
-
-// Tweet represents a Twitter Tweet, previously called a status.
-// https://dev.twitter.com/overview/api/tweets
-// Unused or deprecated fields not provided: Geo, Annotations
-type Tweet struct {
- Contributors []Contributor `json:"contributors"`
- Coordinates *Coordinates `json:"coordinates"`
- CreatedAt string `json:"created_at"`
- CurrentUserRetweet *TweetIdentifier `json:"current_user_retweet"`
- Entities *Entities `json:"entities"`
- FavoriteCount int `json:"favorite_count"`
- Favorited bool `json:"favorited"`
- FilterLevel string `json:"filter_level"`
- ID int64 `json:"id"`
- IDStr string `json:"id_str"`
- InReplyToScreenName string `json:"in_reply_to_screen_name"`
- InReplyToStatusID int64 `json:"in_reply_to_status_id"`
- InReplyToStatusIDStr string `json:"in_reply_to_status_id_str"`
- InReplyToUserID int64 `json:"in_reply_to_user_id"`
- InReplyToUserIDStr string `json:"in_reply_to_user_id_str"`
- Lang string `json:"lang"`
- PossiblySensitive bool `json:"possibly_sensitive"`
- RetweetCount int `json:"retweet_count"`
- Retweeted bool `json:"retweeted"`
- RetweetedStatus *Tweet `json:"retweeted_status"`
- Source string `json:"source"`
- Scopes map[string]interface{} `json:"scopes"`
- Text string `json:"text"`
- Place *Place `json:"place"`
- Truncated bool `json:"truncated"`
- User *User `json:"user"`
- WithheldCopyright bool `json:"withheld_copyright"`
- WithheldInCountries []string `json:"withheld_in_countries"`
- WithheldScope string `json:"withheld_scope"`
- ExtendedEntities *ExtendedEntity `json:"extended_entities"`
- QuotedStatusID int64 `json:"quoted_status_id"`
- QuotedStatusIDStr string `json:"quoted_status_id_str"`
- QuotedStatus *Tweet `json:"quoted_status"`
-}
-
-// Place represents a Twitter Place / Location
-// https://dev.twitter.com/overview/api/places
-type Place struct {
- Attributes map[string]string `json:"attributes"`
- BoundingBox *BoundingBox `json:"bounding_box"`
- Country string `json:"country"`
- CountryCode string `json:"country_code"`
- FullName string `json:"full_name"`
- Geometry *BoundingBox `json:"geometry"`
- ID string `json:"id"`
- Name string `json:"name"`
- PlaceType string `json:"place_type"`
- Polylines []string `json:"polylines"`
- URL string `json:"url"`
-}
-
-// BoundingBox represents the bounding coordinates (longitude, latitutde)
-// defining the bounds of a box containing a Place entity.
-type BoundingBox struct {
- Coordinates [][][2]float64 `json:"coordinates"`
- Type string `json:"type"`
-}
-
-// Contributor represents a brief summary of a User identifiers.
-type Contributor struct {
- ID int64 `json:"id"`
- IDStr string `json:"id_str"`
- ScreenName string `json:"screen_name"`
-}
-
-// Coordinates are pairs of longitude and latitude locations.
-type Coordinates struct {
- Coordinates [2]float64 `json:"coordinates"`
- Type string `json:"type"`
-}
-
-// TweetIdentifier represents the id by which a Tweet can be identified.
-type TweetIdentifier struct {
- ID int64 `json:"id"`
- IDStr string `json:"id_str"`
-}
-
-// StatusService provides methods for accessing Twitter status API endpoints.
-type StatusService struct {
- sling *sling.Sling
-}
-
-// newStatusService returns a new StatusService.
-func newStatusService(sling *sling.Sling) *StatusService {
- return &StatusService{
- sling: sling.Path("statuses/"),
- }
-}
-
-// StatusShowParams are the parameters for StatusService.Show
-type StatusShowParams struct {
- ID int64 `url:"id,omitempty"`
- TrimUser *bool `url:"trim_user,omitempty"`
- IncludeMyRetweet *bool `url:"include_my_retweet,omitempty"`
- IncludeEntities *bool `url:"include_entities,omitempty"`
-}
-
-// Show returns the requested Tweet.
-// https://dev.twitter.com/rest/reference/get/statuses/show/%3Aid
-func (s *StatusService) Show(id int64, params *StatusShowParams) (*Tweet, *http.Response, error) {
- if params == nil {
- params = &StatusShowParams{}
- }
- params.ID = id
- tweet := new(Tweet)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("show.json").QueryStruct(params).Receive(tweet, apiError)
- return tweet, resp, relevantError(err, *apiError)
-}
-
-// StatusLookupParams are the parameters for StatusService.Lookup
-type StatusLookupParams struct {
- ID []int64 `url:"id,omitempty,comma"`
- TrimUser *bool `url:"trim_user,omitempty"`
- IncludeEntities *bool `url:"include_entities,omitempty"`
- Map *bool `url:"map,omitempty"`
-}
-
-// Lookup returns the requested Tweets as a slice. Combines ids from the
-// required ids argument and from params.Id.
-// https://dev.twitter.com/rest/reference/get/statuses/lookup
-func (s *StatusService) Lookup(ids []int64, params *StatusLookupParams) ([]Tweet, *http.Response, error) {
- if params == nil {
- params = &StatusLookupParams{}
- }
- params.ID = append(params.ID, ids...)
- tweets := new([]Tweet)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("lookup.json").QueryStruct(params).Receive(tweets, apiError)
- return *tweets, resp, relevantError(err, *apiError)
-}
-
-// StatusUpdateParams are the parameters for StatusService.Update
-type StatusUpdateParams struct {
- Status string `url:"status,omitempty"`
- InReplyToStatusID int64 `url:"in_reply_to_status_id,omitempty"`
- PossiblySensitive *bool `url:"possibly_sensitive,omitempty"`
- Lat *float64 `url:"lat,omitempty"`
- Long *float64 `url:"long,omitempty"`
- PlaceID string `url:"place_id,omitempty"`
- DisplayCoordinates *bool `url:"display_coordinates,omitempty"`
- TrimUser *bool `url:"trim_user,omitempty"`
- MediaIds []int64 `url:"media_ids,omitempty,comma"`
-}
-
-// Update updates the user's status, also known as Tweeting.
-// Requires a user auth context.
-// https://dev.twitter.com/rest/reference/post/statuses/update
-func (s *StatusService) Update(status string, params *StatusUpdateParams) (*Tweet, *http.Response, error) {
- if params == nil {
- params = &StatusUpdateParams{}
- }
- params.Status = status
- tweet := new(Tweet)
- apiError := new(APIError)
- resp, err := s.sling.New().Post("update.json").BodyForm(params).Receive(tweet, apiError)
- return tweet, resp, relevantError(err, *apiError)
-}
-
-// StatusRetweetParams are the parameters for StatusService.Retweet
-type StatusRetweetParams struct {
- ID int64 `url:"id,omitempty"`
- TrimUser *bool `url:"trim_user,omitempty"`
-}
-
-// Retweet retweets the Tweet with the given id and returns the original Tweet
-// with embedded retweet details.
-// Requires a user auth context.
-// https://dev.twitter.com/rest/reference/post/statuses/retweet/%3Aid
-func (s *StatusService) Retweet(id int64, params *StatusRetweetParams) (*Tweet, *http.Response, error) {
- if params == nil {
- params = &StatusRetweetParams{}
- }
- params.ID = id
- tweet := new(Tweet)
- apiError := new(APIError)
- path := fmt.Sprintf("retweet/%d.json", params.ID)
- resp, err := s.sling.New().Post(path).BodyForm(params).Receive(tweet, apiError)
- return tweet, resp, relevantError(err, *apiError)
-}
-
-// StatusDestroyParams are the parameters for StatusService.Destroy
-type StatusDestroyParams struct {
- ID int64 `url:"id,omitempty"`
- TrimUser *bool `url:"trim_user,omitempty"`
-}
-
-// Destroy deletes the Tweet with the given id and returns it if successful.
-// Requires a user auth context.
-// https://dev.twitter.com/rest/reference/post/statuses/destroy/%3Aid
-func (s *StatusService) Destroy(id int64, params *StatusDestroyParams) (*Tweet, *http.Response, error) {
- if params == nil {
- params = &StatusDestroyParams{}
- }
- params.ID = id
- tweet := new(Tweet)
- apiError := new(APIError)
- path := fmt.Sprintf("destroy/%d.json", params.ID)
- resp, err := s.sling.New().Post(path).BodyForm(params).Receive(tweet, apiError)
- return tweet, resp, relevantError(err, *apiError)
-}
-
-// OEmbedTweet represents a Tweet in oEmbed format.
-type OEmbedTweet struct {
- URL string `json:"url"`
- ProviderURL string `json:"provider_url"`
- ProviderName string `json:"provider_name"`
- AuthorName string `json:"author_name"`
- Version string `json:"version"`
- AuthorURL string `json:"author_url"`
- Type string `json:"type"`
- HTML string `json:"html"`
- Height int64 `json:"height"`
- Width int64 `json:"width"`
- CacheAge string `json:"cache_age"`
-}
-
-// StatusOEmbedParams are the parameters for StatusService.OEmbed
-type StatusOEmbedParams struct {
- ID int64 `url:"id,omitempty"`
- URL string `url:"url,omitempty"`
- Align string `url:"align,omitempty"`
- MaxWidth int64 `url:"maxwidth,omitempty"`
- HideMedia *bool `url:"hide_media,omitempty"`
- HideThread *bool `url:"hide_media,omitempty"`
- OmitScript *bool `url:"hide_media,omitempty"`
- WidgetType string `url:"widget_type,omitempty"`
- HideTweet *bool `url:"hide_tweet,omitempty"`
-}
-
-// OEmbed returns the requested Tweet in oEmbed format.
-// https://dev.twitter.com/rest/reference/get/statuses/oembed
-func (s *StatusService) OEmbed(params *StatusOEmbedParams) (*OEmbedTweet, *http.Response, error) {
- oEmbedTweet := new(OEmbedTweet)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("oembed.json").QueryStruct(params).Receive(oEmbedTweet, apiError)
- return oEmbedTweet, resp, relevantError(err, *apiError)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/statuses_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/statuses_test.go
deleted file mode 100644
index 8743776..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/statuses_test.go
+++ /dev/null
@@ -1,224 +0,0 @@
-package twitter
-
-import (
- "fmt"
- "net/http"
- "strings"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestStatusService_Show(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/show.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"id": "589488862814076930", "include_entities": "false"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"user": {"screen_name": "dghubble"}, "text": ".@audreyr use a DONTREADME file if you really want people to read it :P"}`)
- })
-
- client := NewClient(httpClient)
- params := &StatusShowParams{ID: 5441, IncludeEntities: Bool(false)}
- tweet, _, err := client.Statuses.Show(589488862814076930, params)
- expected := &Tweet{User: &User{ScreenName: "dghubble"}, Text: ".@audreyr use a DONTREADME file if you really want people to read it :P"}
- assert.Nil(t, err)
- assert.Equal(t, expected, tweet)
-}
-
-func TestStatusService_ShowHandlesNilParams(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/show.json", func(w http.ResponseWriter, r *http.Request) {
- assertQuery(t, map[string]string{"id": "589488862814076930"}, r)
- })
- client := NewClient(httpClient)
- client.Statuses.Show(589488862814076930, nil)
-}
-
-func TestStatusService_Lookup(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/lookup.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"id": "20,573893817000140800", "trim_user": "true"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `[{"id": 20, "text": "just setting up my twttr"}, {"id": 573893817000140800, "text": "Don't get lost #PaxEast2015"}]`)
- })
-
- client := NewClient(httpClient)
- params := &StatusLookupParams{ID: []int64{20}, TrimUser: Bool(true)}
- tweets, _, err := client.Statuses.Lookup([]int64{573893817000140800}, params)
- expected := []Tweet{Tweet{ID: 20, Text: "just setting up my twttr"}, Tweet{ID: 573893817000140800, Text: "Don't get lost #PaxEast2015"}}
- assert.Nil(t, err)
- assert.Equal(t, expected, tweets)
-}
-
-func TestStatusService_LookupHandlesNilParams(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
- mux.HandleFunc("/1.1/statuses/lookup.json", func(w http.ResponseWriter, r *http.Request) {
- assertQuery(t, map[string]string{"id": "20,573893817000140800"}, r)
- })
- client := NewClient(httpClient)
- client.Statuses.Lookup([]int64{20, 573893817000140800}, nil)
-}
-
-func TestStatusService_Update(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/update.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "POST", r)
- assertQuery(t, map[string]string{}, r)
- assertPostForm(t, map[string]string{"status": "very informative tweet", "media_ids": "123456789,987654321", "lat": "37.826706", "long": "-122.42219"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"id": 581980947630845953, "text": "very informative tweet"}`)
- })
-
- client := NewClient(httpClient)
- params := &StatusUpdateParams{MediaIds: []int64{123456789, 987654321}, Lat: Float(37.826706), Long: Float(-122.422190)}
- tweet, _, err := client.Statuses.Update("very informative tweet", params)
- expected := &Tweet{ID: 581980947630845953, Text: "very informative tweet"}
- assert.Nil(t, err)
- assert.Equal(t, expected, tweet)
-}
-
-func TestStatusService_UpdateHandlesNilParams(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
- mux.HandleFunc("/1.1/statuses/update.json", func(w http.ResponseWriter, r *http.Request) {
- assertPostForm(t, map[string]string{"status": "very informative tweet"}, r)
- })
- client := NewClient(httpClient)
- client.Statuses.Update("very informative tweet", nil)
-}
-
-func TestStatusService_APIError(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
- mux.HandleFunc("/1.1/statuses/update.json", func(w http.ResponseWriter, r *http.Request) {
- assertPostForm(t, map[string]string{"status": "very informative tweet"}, r)
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(403)
- fmt.Fprintf(w, `{"errors": [{"message": "Status is a duplicate", "code": 187}]}`)
- })
-
- client := NewClient(httpClient)
- _, _, err := client.Statuses.Update("very informative tweet", nil)
- expected := APIError{
- Errors: []ErrorDetail{
- ErrorDetail{Message: "Status is a duplicate", Code: 187},
- },
- }
- if assert.Error(t, err) {
- assert.Equal(t, expected, err)
- }
-}
-
-func TestStatusService_HTTPError(t *testing.T) {
- httpClient, _, server := testServer()
- server.Close()
- client := NewClient(httpClient)
- _, _, err := client.Statuses.Update("very informative tweet", nil)
- if err == nil || !strings.Contains(err.Error(), "connection refused") {
- t.Errorf("Statuses.Update error expected connection refused, got: \n %+v", err)
- }
-}
-
-func TestStatusService_Retweet(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/retweet/20.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "POST", r)
- assertQuery(t, map[string]string{}, r)
- assertPostForm(t, map[string]string{"id": "20", "trim_user": "true"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"id": 581980947630202020, "text": "RT @jack: just setting up my twttr", "retweeted_status": {"id": 20, "text": "just setting up my twttr"}}`)
- })
-
- client := NewClient(httpClient)
- params := &StatusRetweetParams{TrimUser: Bool(true)}
- tweet, _, err := client.Statuses.Retweet(20, params)
- expected := &Tweet{ID: 581980947630202020, Text: "RT @jack: just setting up my twttr", RetweetedStatus: &Tweet{ID: 20, Text: "just setting up my twttr"}}
- assert.Nil(t, err)
- assert.Equal(t, expected, tweet)
-}
-
-func TestStatusService_RetweetHandlesNilParams(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/retweet/20.json", func(w http.ResponseWriter, r *http.Request) {
- assertPostForm(t, map[string]string{"id": "20"}, r)
- })
-
- client := NewClient(httpClient)
- client.Statuses.Retweet(20, nil)
-}
-
-func TestStatusService_Destroy(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/destroy/40.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "POST", r)
- assertQuery(t, map[string]string{}, r)
- assertPostForm(t, map[string]string{"id": "40", "trim_user": "true"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"id": 40, "text": "wishing I had another sammich"}`)
- })
-
- client := NewClient(httpClient)
- params := &StatusDestroyParams{TrimUser: Bool(true)}
- tweet, _, err := client.Statuses.Destroy(40, params)
- // feed Biz Stone a sammich, he deletes sammich Tweet
- expected := &Tweet{ID: 40, Text: "wishing I had another sammich"}
- assert.Nil(t, err)
- assert.Equal(t, expected, tweet)
-}
-
-func TestStatusService_DestroyHandlesNilParams(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/destroy/40.json", func(w http.ResponseWriter, r *http.Request) {
- assertPostForm(t, map[string]string{"id": "40"}, r)
- })
-
- client := NewClient(httpClient)
- client.Statuses.Destroy(40, nil)
-}
-
-func TestStatusService_OEmbed(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/oembed.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"id": "691076766878691329", "maxwidth": "400", "hide_media": "true"}, r)
- w.Header().Set("Content-Type", "application/json")
- // abbreviated oEmbed response
- fmt.Fprintf(w, `{"url": "https://twitter.com/dghubble/statuses/691076766878691329", "width": 400, "html": " "}`)
- })
-
- client := NewClient(httpClient)
- params := &StatusOEmbedParams{
- ID: 691076766878691329,
- MaxWidth: 400,
- HideMedia: Bool(true),
- }
- oembed, _, err := client.Statuses.OEmbed(params)
- expected := &OEmbedTweet{
- URL: "https://twitter.com/dghubble/statuses/691076766878691329",
- Width: 400,
- HTML: " ",
- }
- assert.Nil(t, err)
- assert.Equal(t, expected, oembed)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/stream_messages.go b/vendor/src/github.com/dghubble/go-twitter/twitter/stream_messages.go
deleted file mode 100644
index ad89bad..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/stream_messages.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package twitter
-
-// StatusDeletion indicates that a given Tweet has been deleted.
-// https://dev.twitter.com/streaming/overview/messages-types#status_deletion_notices_delete
-type StatusDeletion struct {
- ID int64 `json:"id"`
- IDStr string `json:"id_str"`
- UserID int64 `json:"user_id"`
- UserIDStr string `json:"user_id_str"`
-}
-
-type statusDeletionNotice struct {
- Delete struct {
- StatusDeletion *StatusDeletion `json:"status"`
- } `json:"delete"`
-}
-
-// LocationDeletion indicates geolocation data must be stripped from a range
-// of Tweets.
-// https://dev.twitter.com/streaming/overview/messages-types#Location_deletion_notices_scrub_geo
-type LocationDeletion struct {
- UserID int64 `json:"user_id"`
- UserIDStr string `json:"user_id_str"`
- UpToStatusID int64 `json:"up_to_status_id"`
- UpToStatusIDStr string `json:"up_to_status_id_str"`
-}
-
-type locationDeletionNotice struct {
- ScrubGeo *LocationDeletion `json:"scrub_geo"`
-}
-
-// StreamLimit indicates a stream matched more statuses than its rate limit
-// allowed. The track number is the number of undelivered matches.
-// https://dev.twitter.com/streaming/overview/messages-types#limit_notices
-type StreamLimit struct {
- Track int64 `json:"track"`
-}
-
-type streamLimitNotice struct {
- Limit *StreamLimit `json:"limit"`
-}
-
-// StatusWithheld indicates a Tweet with the given ID, belonging to UserId,
-// has been withheld in certain countries.
-// https://dev.twitter.com/streaming/overview/messages-types#withheld_content_notices
-type StatusWithheld struct {
- ID int64 `json:"id"`
- UserID int64 `json:"user_id"`
- WithheldInCountries []string `json:"withheld_in_countries"`
-}
-
-type statusWithheldNotice struct {
- StatusWithheld *StatusWithheld `json:"status_withheld"`
-}
-
-// UserWithheld indicates a User with the given ID has been withheld in
-// certain countries.
-// https://dev.twitter.com/streaming/overview/messages-types#withheld_content_notices
-type UserWithheld struct {
- ID int64 `json:"id"`
- WithheldInCountries []string `json:"withheld_in_countries"`
-}
-type userWithheldNotice struct {
- UserWithheld *UserWithheld `json:"user_withheld"`
-}
-
-// StreamDisconnect indicates the stream has been shutdown for some reason.
-// https://dev.twitter.com/streaming/overview/messages-types#disconnect_messages
-type StreamDisconnect struct {
- Code int64 `json:"code"`
- StreamName string `json:"stream_name"`
- Reason string `json:"reason"`
-}
-
-type streamDisconnectNotice struct {
- StreamDisconnect *StreamDisconnect `json:"disconnect"`
-}
-
-// StallWarning indicates the client is falling behind in the stream.
-// https://dev.twitter.com/streaming/overview/messages-types#stall_warnings
-type StallWarning struct {
- Code string `json:"code"`
- Message string `json:"message"`
- PercentFull int `json:"percent_full"`
-}
-
-type stallWarningNotice struct {
- StallWarning *StallWarning `json:"warning"`
-}
-
-// FriendsList is a list of some of a user's friends.
-// https://dev.twitter.com/streaming/overview/messages-types#friends_list_friends
-type FriendsList struct {
- Friends []int64 `json:"friends"`
-}
-
-type directMessageNotice struct {
- DirectMessage *DirectMessage `json:"direct_message"`
-}
-
-// Event is a non-Tweet notification message (e.g. like, retweet, follow).
-// https://dev.twitter.com/streaming/overview/messages-types#Events_event
-type Event struct {
- Event string `json:"event"`
- CreatedAt string `json:"created_at"`
- Target *User `json:"target"`
- Source *User `json:"source"`
- // TODO: add List or deprecate it
- TargetObject *Tweet `json:"target_object"`
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/stream_utils.go b/vendor/src/github.com/dghubble/go-twitter/twitter/stream_utils.go
deleted file mode 100644
index de1ede9..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/stream_utils.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package twitter
-
-import (
- "strings"
- "time"
-)
-
-// stopped returns true if the done channel receives, false otherwise.
-func stopped(done <-chan struct{}) bool {
- select {
- case <-done:
- return true
- default:
- return false
- }
-}
-
-// sleepOrDone pauses the current goroutine until the done channel receives
-// or until at least the duration d has elapsed, whichever comes first. This
-// is similar to time.Sleep(d), except it can be interrupted.
-func sleepOrDone(d time.Duration, done <-chan struct{}) {
- select {
- case <-time.After(d):
- return
- case <-done:
- return
- }
-}
-
-// scanLines is a split function for a Scanner that returns each line of text
-// stripped of the end-of-line marker "\r\n" used by Twitter Streaming APIs.
-// This differs from the bufio.ScanLines split function which considers the
-// '\r' optional.
-// https://dev.twitter.com/streaming/overview/processing
-func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
- if atEOF && len(data) == 0 {
- return 0, nil, nil
- }
- if i := strings.Index(string(data), "\r\n"); i >= 0 {
- // We have a full '\r\n' terminated line.
- return i + 2, data[0:i], nil
- }
- // If we're at EOF, we have a final, non-terminated line. Return it.
- if atEOF {
- return len(data), dropCR(data), nil
- }
- // Request more data.
- return 0, nil, nil
-}
-
-func dropCR(data []byte) []byte {
- if len(data) > 0 && data[len(data)-1] == '\n' {
- return data[0 : len(data)-1]
- }
- return data
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/stream_utils_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/stream_utils_test.go
deleted file mode 100644
index 5087d2e..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/stream_utils_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package twitter
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestStopped(t *testing.T) {
- done := make(chan struct{})
- assert.False(t, stopped(done))
- close(done)
- assert.True(t, stopped(done))
-}
-
-func TestSleepOrDone_Sleep(t *testing.T) {
- wait := time.Nanosecond * 20
- done := make(chan struct{})
- completed := make(chan struct{})
- go func() {
- sleepOrDone(wait, done)
- close(completed)
- }()
- // wait for goroutine SleepOrDone to sleep
- assertDone(t, completed, defaultTestTimeout)
-}
-
-func TestSleepOrDone_Done(t *testing.T) {
- wait := time.Second * 5
- done := make(chan struct{})
- completed := make(chan struct{})
- go func() {
- sleepOrDone(wait, done)
- close(completed)
- }()
- // close done, interrupting SleepOrDone
- close(done)
- // assert that SleepOrDone exited, closing completed
- assertDone(t, completed, defaultTestTimeout)
-}
-
-func TestScanLines(t *testing.T) {
- cases := []struct {
- input []byte
- atEOF bool
- advance int
- token []byte
- }{
- {[]byte("Line 1\r\n"), false, 8, []byte("Line 1")},
- {[]byte("Line 1\n"), false, 0, nil},
- {[]byte("Line 1"), false, 0, nil},
- {[]byte(""), false, 0, nil},
- {[]byte("Line 1\r\n"), true, 8, []byte("Line 1")},
- {[]byte("Line 1\n"), true, 7, []byte("Line 1")},
- {[]byte("Line 1"), true, 6, []byte("Line 1")},
- {[]byte(""), true, 0, nil},
- }
- for _, c := range cases {
- advance, token, _ := scanLines(c.input, c.atEOF)
- assert.Equal(t, c.advance, advance)
- assert.Equal(t, c.token, token)
- }
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/streams.go b/vendor/src/github.com/dghubble/go-twitter/twitter/streams.go
deleted file mode 100644
index 2864971..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/streams.go
+++ /dev/null
@@ -1,326 +0,0 @@
-package twitter
-
-import (
- "bufio"
- "encoding/json"
- "io"
- "net/http"
- "sync"
- "time"
-
- "github.com/cenkalti/backoff"
- "github.com/dghubble/sling"
-)
-
-const (
- userAgent = "go-twitter v0.1"
- publicStream = "https://stream.twitter.com/1.1/"
- userStream = "https://userstream.twitter.com/1.1/"
- siteStream = "https://sitestream.twitter.com/1.1/"
-)
-
-// StreamService provides methods for accessing the Twitter Streaming API.
-type StreamService struct {
- client *http.Client
- public *sling.Sling
- user *sling.Sling
- site *sling.Sling
-}
-
-// newStreamService returns a new StreamService.
-func newStreamService(client *http.Client, sling *sling.Sling) *StreamService {
- sling.Set("User-Agent", userAgent)
- return &StreamService{
- client: client,
- public: sling.New().Base(publicStream).Path("statuses/"),
- user: sling.New().Base(userStream),
- site: sling.New().Base(siteStream),
- }
-}
-
-// StreamFilterParams are parameters for StreamService.Filter.
-type StreamFilterParams struct {
- FilterLevel string `url:"filter_level,omitempty"`
- Follow []string `url:"follow,omitempty,comma"`
- Language []string `url:"language,omitempty,comma"`
- Locations []string `url:"locations,omitempty,comma"`
- StallWarnings *bool `url:"stall_warnings,omitempty"`
- Track []string `url:"track,omitempty,comma"`
-}
-
-// Filter returns messages that match one or more filter predicates.
-// https://dev.twitter.com/streaming/reference/post/statuses/filter
-func (srv *StreamService) Filter(params *StreamFilterParams) (*Stream, error) {
- req, err := srv.public.New().Post("filter.json").QueryStruct(params).Request()
- if err != nil {
- return nil, err
- }
- return newStream(srv.client, req), nil
-}
-
-// StreamSampleParams are the parameters for StreamService.Sample.
-type StreamSampleParams struct {
- StallWarnings *bool `url:"stall_warnings,omitempty"`
-}
-
-// Sample returns a small sample of public stream messages.
-// https://dev.twitter.com/streaming/reference/get/statuses/sample
-func (srv *StreamService) Sample(params *StreamSampleParams) (*Stream, error) {
- req, err := srv.public.New().Get("sample.json").QueryStruct(params).Request()
- if err != nil {
- return nil, err
- }
- return newStream(srv.client, req), nil
-}
-
-// StreamUserParams are the parameters for StreamService.User.
-type StreamUserParams struct {
- FilterLevel string `url:"filter_level,omitempty"`
- Language []string `url:"language,omitempty,comma"`
- Locations []string `url:"locations,omitempty,comma"`
- Replies string `url:"replies,omitempty"`
- StallWarnings *bool `url:"stall_warnings,omitempty"`
- Track []string `url:"track,omitempty,comma"`
- With string `url:"with,omitempty"`
-}
-
-// User returns a stream of messages specific to the authenticated User.
-// https://dev.twitter.com/streaming/reference/get/user
-func (srv *StreamService) User(params *StreamUserParams) (*Stream, error) {
- req, err := srv.user.New().Get("user.json").QueryStruct(params).Request()
- if err != nil {
- return nil, err
- }
- return newStream(srv.client, req), nil
-}
-
-// StreamSiteParams are the parameters for StreamService.Site.
-type StreamSiteParams struct {
- FilterLevel string `url:"filter_level,omitempty"`
- Follow []string `url:"follow,omitempty,comma"`
- Language []string `url:"language,omitempty,comma"`
- Replies string `url:"replies,omitempty"`
- StallWarnings *bool `url:"stall_warnings,omitempty"`
- With string `url:"with,omitempty"`
-}
-
-// Site returns messages for a set of users.
-// Requires special permission to access.
-// https://dev.twitter.com/streaming/reference/get/site
-func (srv *StreamService) Site(params *StreamSiteParams) (*Stream, error) {
- req, err := srv.site.New().Get("site.json").QueryStruct(params).Request()
- if err != nil {
- return nil, err
- }
- return newStream(srv.client, req), nil
-}
-
-// StreamFirehoseParams are the parameters for StreamService.Firehose.
-type StreamFirehoseParams struct {
- Count int `url:"count,omitempty"`
- FilterLevel string `url:"filter_level,omitempty"`
- Language []string `url:"language,omitempty,comma"`
- StallWarnings *bool `url:"stall_warnings,omitempty"`
-}
-
-// Firehose returns all public messages and statuses.
-// Requires special permission to access.
-// https://dev.twitter.com/streaming/reference/get/statuses/firehose
-func (srv *StreamService) Firehose(params *StreamFirehoseParams) (*Stream, error) {
- req, err := srv.public.New().Get("firehose.json").QueryStruct(params).Request()
- if err != nil {
- return nil, err
- }
- return newStream(srv.client, req), nil
-}
-
-// Stream maintains a connection to the Twitter Streaming API, receives
-// messages from the streaming response, and sends them on the Messages
-// channel from a goroutine. The stream goroutine stops itself if an EOF is
-// reached or retry errors occur, also closing the Messages channel.
-//
-// The client must Stop() the stream when finished receiving, which will
-// wait until the stream is properly stopped.
-type Stream struct {
- client *http.Client
- Messages chan interface{}
- done chan struct{}
- group *sync.WaitGroup
- body io.Closer
-}
-
-// newStream creates a Stream and starts a goroutine to retry connecting and
-// receive from a stream response. The goroutine may stop due to retry errors
-// or be stopped by calling Stop() on the stream.
-func newStream(client *http.Client, req *http.Request) *Stream {
- s := &Stream{
- client: client,
- Messages: make(chan interface{}),
- done: make(chan struct{}),
- group: &sync.WaitGroup{},
- }
- s.group.Add(1)
- go s.retry(req, newExponentialBackOff(), newAggressiveExponentialBackOff())
- return s
-}
-
-// Stop signals retry and receiver to stop, closes the Messages channel, and
-// blocks until done.
-func (s *Stream) Stop() {
- close(s.done)
- // Scanner does not have a Stop() or take a done channel, so for low volume
- // streams Scan() blocks until the next keep-alive. Close the resp.Body to
- // escape and stop the stream in a timely fashion.
- if s.body != nil {
- s.body.Close()
- }
- // block until the retry goroutine stops
- s.group.Wait()
-}
-
-// retry retries making the given http.Request and receiving the response
-// according to the Twitter backoff policies. Callers should invoke in a
-// goroutine since backoffs sleep between retries.
-// https://dev.twitter.com/streaming/overview/connecting
-func (s *Stream) retry(req *http.Request, expBackOff backoff.BackOff, aggExpBackOff backoff.BackOff) {
- // close Messages channel and decrement the wait group counter
- defer close(s.Messages)
- defer s.group.Done()
-
- var wait time.Duration
- for !stopped(s.done) {
- resp, err := s.client.Do(req)
- if err != nil {
- // stop retrying for HTTP protocol errors
- s.Messages <- err
- return
- }
- // when err is nil, resp contains a non-nil Body which must be closed
- defer resp.Body.Close()
- s.body = resp.Body
- switch resp.StatusCode {
- case 200:
- // receive stream response Body, handles closing
- s.receive(resp.Body)
- expBackOff.Reset()
- aggExpBackOff.Reset()
- case 503:
- // exponential backoff
- wait = expBackOff.NextBackOff()
- case 420, 429:
- // aggressive exponential backoff
- wait = aggExpBackOff.NextBackOff()
- default:
- // stop retrying for other response codes
- resp.Body.Close()
- return
- }
- // close response before each retry
- resp.Body.Close()
- if wait == backoff.Stop {
- return
- }
- sleepOrDone(wait, s.done)
- }
-}
-
-// receive scans a stream response body, JSON decodes tokens to messages, and
-// sends messages to the Messages channel. Receiving continues until an EOF,
-// scan error, or the done channel is closed.
-func (s *Stream) receive(body io.ReadCloser) {
- defer body.Close()
- // A bufio.Scanner steps through 'tokens' of data on each Scan() using a
- // SplitFunc. SplitFunc tokenizes input bytes to return the number of bytes
- // to advance, the token slice of bytes, and any errors.
- scanner := bufio.NewScanner(body)
- // default ScanLines SplitFunc is incorrect for Twitter Streams, set custom
- scanner.Split(scanLines)
- for !stopped(s.done) && scanner.Scan() {
- token := scanner.Bytes()
- if len(token) == 0 {
- // empty keep-alive
- continue
- }
- select {
- // send messages, data, or errors
- case s.Messages <- getMessage(token):
- continue
- // allow client to Stop(), even if not receiving
- case <-s.done:
- return
- }
- }
-}
-
-// getMessage unmarshals the token and returns a message struct, if the type
-// can be determined. Otherwise, returns the token unmarshalled into a data
-// map[string]interface{} or the unmarshal error.
-func getMessage(token []byte) interface{} {
- var data map[string]interface{}
- // unmarshal JSON encoded token into a map for
- err := json.Unmarshal(token, &data)
- if err != nil {
- return err
- }
- return decodeMessage(token, data)
-}
-
-// decodeMessage determines the message type from known data keys, allocates
-// at most one message struct, and JSON decodes the token into the message.
-// Returns the message struct or the data map if the message type could not be
-// determined.
-func decodeMessage(token []byte, data map[string]interface{}) interface{} {
- if hasPath(data, "retweet_count") {
- tweet := new(Tweet)
- json.Unmarshal(token, tweet)
- return tweet
- } else if hasPath(data, "direct_message") {
- notice := new(directMessageNotice)
- json.Unmarshal(token, notice)
- return notice.DirectMessage
- } else if hasPath(data, "delete") {
- notice := new(statusDeletionNotice)
- json.Unmarshal(token, notice)
- return notice.Delete.StatusDeletion
- } else if hasPath(data, "scrub_geo") {
- notice := new(locationDeletionNotice)
- json.Unmarshal(token, notice)
- return notice.ScrubGeo
- } else if hasPath(data, "limit") {
- notice := new(streamLimitNotice)
- json.Unmarshal(token, notice)
- return notice.Limit
- } else if hasPath(data, "status_withheld") {
- notice := new(statusWithheldNotice)
- json.Unmarshal(token, notice)
- return notice.StatusWithheld
- } else if hasPath(data, "user_withheld") {
- notice := new(userWithheldNotice)
- json.Unmarshal(token, notice)
- return notice.UserWithheld
- } else if hasPath(data, "disconnect") {
- notice := new(streamDisconnectNotice)
- json.Unmarshal(token, notice)
- return notice.StreamDisconnect
- } else if hasPath(data, "warning") {
- notice := new(stallWarningNotice)
- json.Unmarshal(token, notice)
- return notice.StallWarning
- } else if hasPath(data, "friends") {
- friendsList := new(FriendsList)
- json.Unmarshal(token, friendsList)
- return friendsList
- } else if hasPath(data, "event") {
- event := new(Event)
- json.Unmarshal(token, event)
- return event
- }
- // message type unknown, return the data map[string]interface{}
- return data
-}
-
-// hasPath returns true if the map contains the given key, false otherwise.
-func hasPath(data map[string]interface{}, key string) bool {
- _, ok := data[key]
- return ok
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/streams_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/streams_test.go
deleted file mode 100644
index a56bf8e..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/streams_test.go
+++ /dev/null
@@ -1,352 +0,0 @@
-package twitter
-
-import (
- "fmt"
- "net/http"
- "sync"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestStream_MessageJSONError(t *testing.T) {
- badJSON := []byte(`{`)
- msg := getMessage(badJSON)
- assert.EqualError(t, msg.(error), "unexpected end of JSON input")
-}
-
-func TestStream_GetMessageTweet(t *testing.T) {
- msgJSON := []byte(`{"id": 20, "text": "just setting up my twttr", "retweet_count": "68535"}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &Tweet{}, msg)
-}
-
-func TestStream_GetMessageDirectMessage(t *testing.T) {
- msgJSON := []byte(`{"direct_message": {"id": 666024290140217347}}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &DirectMessage{}, msg)
-}
-
-func TestStream_GetMessageDelete(t *testing.T) {
- msgJSON := []byte(`{"delete": { "id": 20}}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &StatusDeletion{}, msg)
-}
-
-func TestStream_GetMessageLocationDeletion(t *testing.T) {
- msgJSON := []byte(`{"scrub_geo": { "up_to_status_id": 20}}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &LocationDeletion{}, msg)
-}
-
-func TestStream_GetMessageStreamLimit(t *testing.T) {
- msgJSON := []byte(`{"limit": { "track": 10 }}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &StreamLimit{}, msg)
-}
-
-func TestStream_StatusWithheld(t *testing.T) {
- msgJSON := []byte(`{"status_withheld": { "id": 20, "user_id": 12, "withheld_in_countries":["USA", "China"] }}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &StatusWithheld{}, msg)
-}
-
-func TestStream_UserWithheld(t *testing.T) {
- msgJSON := []byte(`{"user_withheld": { "id": 12, "withheld_in_countries":["USA", "China"] }}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &UserWithheld{}, msg)
-}
-
-func TestStream_StreamDisconnect(t *testing.T) {
- msgJSON := []byte(`{"disconnect": { "code": "420", "stream_name": "streaming stuff", "reason": "too many connections" }}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &StreamDisconnect{}, msg)
-}
-
-func TestStream_StallWarning(t *testing.T) {
- msgJSON := []byte(`{"warning": { "code": "420", "percent_full": 90, "message": "a lot of messages" }}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &StallWarning{}, msg)
-}
-
-func TestStream_FriendsList(t *testing.T) {
- msgJSON := []byte(`{"friends": [666024290140217347, 666024290140217349, 666024290140217342]}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &FriendsList{}, msg)
-}
-
-func TestStream_Event(t *testing.T) {
- msgJSON := []byte(`{"event": "block", "target": {"name": "XKCD Comic", "favourites_count": 2}, "source": {"name": "XKCD Comic2", "favourites_count": 3}, "created_at": "Sat Sep 4 16:10:54 +0000 2010"}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, &Event{}, msg)
-}
-
-func TestStream_Unknown(t *testing.T) {
- msgJSON := []byte(`{"unknown_data": {"new_twitter_type":"unexpected"}}`)
- msg := getMessage(msgJSON)
- assert.IsType(t, map[string]interface{}{}, msg)
-}
-
-func TestStream_Filter(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- reqCount := 0
- mux.HandleFunc("/1.1/statuses/filter.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "POST", r)
- assertQuery(t, map[string]string{"track": "gophercon,golang"}, r)
- switch reqCount {
- case 0:
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Transfer-Encoding", "chunked")
- fmt.Fprintf(w,
- `{"text": "Gophercon talks!"}`+"\r\n"+
- `{"text": "Gophercon super talks!"}`+"\r\n",
- )
- default:
- // Only allow first request
- http.Error(w, "Stream API not available!", 130)
- }
- reqCount++
- })
-
- counts := &counter{}
- demux := newCounterDemux(counts)
- client := NewClient(httpClient)
- streamFilterParams := &StreamFilterParams{
- Track: []string{"gophercon", "golang"},
- }
- stream, err := client.Streams.Filter(streamFilterParams)
- // assert that the expected messages are received
- assert.NoError(t, err)
- defer stream.Stop()
- for message := range stream.Messages {
- demux.Handle(message)
- }
- expectedCounts := &counter{all: 2, other: 2}
- assert.Equal(t, expectedCounts, counts)
-}
-
-func TestStream_Sample(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- reqCount := 0
- mux.HandleFunc("/1.1/statuses/sample.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"stall_warnings": "true"}, r)
- switch reqCount {
- case 0:
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Transfer-Encoding", "chunked")
- fmt.Fprintf(w,
- `{"text": "Gophercon talks!"}`+"\r\n"+
- `{"text": "Gophercon super talks!"}`+"\r\n",
- )
- default:
- // Only allow first request
- http.Error(w, "Stream API not available!", 130)
- }
- reqCount++
- })
-
- counts := &counter{}
- demux := newCounterDemux(counts)
- client := NewClient(httpClient)
- streamSampleParams := &StreamSampleParams{
- StallWarnings: Bool(true),
- }
- stream, err := client.Streams.Sample(streamSampleParams)
- // assert that the expected messages are received
- assert.NoError(t, err)
- defer stream.Stop()
- for message := range stream.Messages {
- demux.Handle(message)
- }
- expectedCounts := &counter{all: 2, other: 2}
- assert.Equal(t, expectedCounts, counts)
-}
-
-func TestStream_User(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- reqCount := 0
- mux.HandleFunc("/1.1/user.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"stall_warnings": "true", "with": "followings"}, r)
- switch reqCount {
- case 0:
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Transfer-Encoding", "chunked")
- fmt.Fprintf(w, `{"friends": [666024290140217347, 666024290140217349, 666024290140217342]}`+"\r\n"+"\r\n")
- default:
- // Only allow first request
- http.Error(w, "Stream API not available!", 130)
- }
- reqCount++
- })
-
- counts := &counter{}
- demux := newCounterDemux(counts)
- client := NewClient(httpClient)
- streamUserParams := &StreamUserParams{
- StallWarnings: Bool(true),
- With: "followings",
- }
- stream, err := client.Streams.User(streamUserParams)
- // assert that the expected messages are received
- assert.NoError(t, err)
- defer stream.Stop()
- for message := range stream.Messages {
- demux.Handle(message)
- }
- expectedCounts := &counter{all: 1, friendsList: 1}
- assert.Equal(t, expectedCounts, counts)
-}
-
-func TestStream_Site(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- reqCount := 0
- mux.HandleFunc("/1.1/site.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"follow": "666024290140217347,666024290140217349"}, r)
- switch reqCount {
- case 0:
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Transfer-Encoding", "chunked")
- fmt.Fprintf(w,
- `{"text": "Gophercon talks!"}`+"\r\n"+
- `{"text": "Gophercon super talks!"}`+"\r\n",
- )
- default:
- // Only allow first request
- http.Error(w, "Stream API not available!", 130)
- }
- reqCount++
- })
-
- counts := &counter{}
- demux := newCounterDemux(counts)
- client := NewClient(httpClient)
- streamSiteParams := &StreamSiteParams{
- Follow: []string{"666024290140217347", "666024290140217349"},
- }
- stream, err := client.Streams.Site(streamSiteParams)
- // assert that the expected messages are received
- assert.NoError(t, err)
- defer stream.Stop()
- for message := range stream.Messages {
- demux.Handle(message)
- }
- expectedCounts := &counter{all: 2, other: 2}
- assert.Equal(t, expectedCounts, counts)
-}
-
-func TestStream_PublicFirehose(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- reqCount := 0
- mux.HandleFunc("/1.1/statuses/firehose.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"count": "100"}, r)
- switch reqCount {
- case 0:
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("Transfer-Encoding", "chunked")
- fmt.Fprintf(w,
- `{"text": "Gophercon talks!"}`+"\r\n"+
- `{"text": "Gophercon super talks!"}`+"\r\n",
- )
- default:
- // Only allow first request
- http.Error(w, "Stream API not available!", 130)
- }
- reqCount++
- })
-
- counts := &counter{}
- demux := newCounterDemux(counts)
- client := NewClient(httpClient)
- streamFirehoseParams := &StreamFirehoseParams{
- Count: 100,
- }
- stream, err := client.Streams.Firehose(streamFirehoseParams)
- // assert that the expected messages are received
- assert.NoError(t, err)
- defer stream.Stop()
- for message := range stream.Messages {
- demux.Handle(message)
- }
- expectedCounts := &counter{all: 2, other: 2}
- assert.Equal(t, expectedCounts, counts)
-}
-
-func TestStreamRetry_ExponentialBackoff(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- reqCount := 0
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- switch reqCount {
- case 0:
- http.Error(w, "Service Unavailable", 503)
- default:
- // Only allow first request
- http.Error(w, "Stream API not available!", 130)
- }
- reqCount++
- })
- stream := &Stream{
- client: httpClient,
- Messages: make(chan interface{}),
- done: make(chan struct{}),
- group: &sync.WaitGroup{},
- }
- stream.group.Add(1)
- req, _ := http.NewRequest("GET", "http://example.com/", nil)
- expBackoff := &BackOffRecorder{}
- // receive messages and throw them away
- go NewSwitchDemux().HandleChan(stream.Messages)
- stream.retry(req, expBackoff, nil)
- defer stream.Stop()
- // assert exponential backoff in response to 503
- assert.Equal(t, 1, expBackoff.Count)
-}
-
-func TestStreamRetry_AggressiveBackoff(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- reqCount := 0
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- switch reqCount {
- case 0:
- http.Error(w, "Enhance Your Calm", 420)
- case 1:
- http.Error(w, "Too Many Requests", 429)
- default:
- // Only allow first request
- http.Error(w, "Stream API not available!", 130)
- }
- reqCount++
- })
- stream := &Stream{
- client: httpClient,
- Messages: make(chan interface{}),
- done: make(chan struct{}),
- group: &sync.WaitGroup{},
- }
- stream.group.Add(1)
- req, _ := http.NewRequest("GET", "http://example.com/", nil)
- aggExpBackoff := &BackOffRecorder{}
- // receive messages and throw them away
- go NewSwitchDemux().HandleChan(stream.Messages)
- stream.retry(req, nil, aggExpBackoff)
- defer stream.Stop()
- // assert aggressive exponential backoff in response to 420 and 429
- assert.Equal(t, 2, aggExpBackoff.Count)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/timelines.go b/vendor/src/github.com/dghubble/go-twitter/twitter/timelines.go
deleted file mode 100644
index f2ada6d..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/timelines.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package twitter
-
-import (
- "net/http"
-
- "github.com/dghubble/sling"
-)
-
-// TimelineService provides methods for accessing Twitter status timeline
-// API endpoints.
-type TimelineService struct {
- sling *sling.Sling
-}
-
-// newTimelineService returns a new TimelineService.
-func newTimelineService(sling *sling.Sling) *TimelineService {
- return &TimelineService{
- sling: sling.Path("statuses/"),
- }
-}
-
-// UserTimelineParams are the parameters for TimelineService.UserTimeline.
-type UserTimelineParams struct {
- UserID int64 `url:"user_id,omitempty"`
- ScreenName string `url:"screen_name,omitempty"`
- Count int `url:"count,omitempty"`
- SinceID int64 `url:"since_id,omitempty"`
- MaxID int64 `url:"max_id,omitempty"`
- TrimUser *bool `url:"trim_user,omitempty"`
- ExcludeReplies *bool `url:"exclude_replies,omitempty"`
- ContributorDetails *bool `url:"contributor_details,omitempty"`
- IncludeRetweets *bool `url:"include_rts,omitempty"`
-}
-
-// UserTimeline returns recent Tweets from the specified user.
-// https://dev.twitter.com/rest/reference/get/statuses/user_timeline
-func (s *TimelineService) UserTimeline(params *UserTimelineParams) ([]Tweet, *http.Response, error) {
- tweets := new([]Tweet)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("user_timeline.json").QueryStruct(params).Receive(tweets, apiError)
- return *tweets, resp, relevantError(err, *apiError)
-}
-
-// HomeTimelineParams are the parameters for TimelineService.HomeTimeline.
-type HomeTimelineParams struct {
- Count int `url:"count,omitempty"`
- SinceID int64 `url:"since_id,omitempty"`
- MaxID int64 `url:"max_id,omitempty"`
- TrimUser *bool `url:"trim_user,omitempty"`
- ExcludeReplies *bool `url:"exclude_replies,omitempty"`
- ContributorDetails *bool `url:"contributor_details,omitempty"`
- IncludeEntities *bool `url:"include_entities,omitempty"`
-}
-
-// HomeTimeline returns recent Tweets and retweets from the user and those
-// users they follow.
-// Requires a user auth context.
-// https://dev.twitter.com/rest/reference/get/statuses/home_timeline
-func (s *TimelineService) HomeTimeline(params *HomeTimelineParams) ([]Tweet, *http.Response, error) {
- tweets := new([]Tweet)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("home_timeline.json").QueryStruct(params).Receive(tweets, apiError)
- return *tweets, resp, relevantError(err, *apiError)
-}
-
-// MentionTimelineParams are the parameters for TimelineService.MentionTimeline.
-type MentionTimelineParams struct {
- Count int `url:"count,omitempty"`
- SinceID int64 `url:"since_id,omitempty"`
- MaxID int64 `url:"max_id,omitempty"`
- TrimUser *bool `url:"trim_user,omitempty"`
- ContributorDetails *bool `url:"contributor_details,omitempty"`
- IncludeEntities *bool `url:"include_entities,omitempty"`
-}
-
-// MentionTimeline returns recent Tweet mentions of the authenticated user.
-// Requires a user auth context.
-// https://dev.twitter.com/rest/reference/get/statuses/mentions_timeline
-func (s *TimelineService) MentionTimeline(params *MentionTimelineParams) ([]Tweet, *http.Response, error) {
- tweets := new([]Tweet)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("mentions_timeline.json").QueryStruct(params).Receive(tweets, apiError)
- return *tweets, resp, relevantError(err, *apiError)
-}
-
-// RetweetsOfMeTimelineParams are the parameters for
-// TimelineService.RetweetsOfMeTimeline.
-type RetweetsOfMeTimelineParams struct {
- Count int `url:"count,omitempty"`
- SinceID int64 `url:"since_id,omitempty"`
- MaxID int64 `url:"max_id,omitempty"`
- TrimUser *bool `url:"trim_user,omitempty"`
- IncludeEntities *bool `url:"include_entities,omitempty"`
- IncludeUserEntities *bool `url:"include_user_entities"`
-}
-
-// RetweetsOfMeTimeline returns the most recent Tweets by the authenticated
-// user that have been retweeted by others.
-// Requires a user auth context.
-// https://dev.twitter.com/rest/reference/get/statuses/retweets_of_me
-func (s *TimelineService) RetweetsOfMeTimeline(params *RetweetsOfMeTimelineParams) ([]Tweet, *http.Response, error) {
- tweets := new([]Tweet)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("retweets_of_me.json").QueryStruct(params).Receive(tweets, apiError)
- return *tweets, resp, relevantError(err, *apiError)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/timelines_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/timelines_test.go
deleted file mode 100644
index 40550c9..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/timelines_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package twitter
-
-import (
- "fmt"
- "net/http"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestTimelineService_UserTimeline(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/user_timeline.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"user_id": "113419064", "trim_user": "true", "include_rts": "false"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `[{"text": "Gophercon talks!"}, {"text": "Why gophers are so adorable"}]`)
- })
-
- client := NewClient(httpClient)
- tweets, _, err := client.Timelines.UserTimeline(&UserTimelineParams{UserID: 113419064, TrimUser: Bool(true), IncludeRetweets: Bool(false)})
- expected := []Tweet{Tweet{Text: "Gophercon talks!"}, Tweet{Text: "Why gophers are so adorable"}}
- assert.Nil(t, err)
- assert.Equal(t, expected, tweets)
-}
-
-func TestTimelineService_HomeTimeline(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/home_timeline.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"since_id": "589147592367431680", "exclude_replies": "false"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `[{"text": "Live on #Periscope"}, {"text": "Clickbait journalism"}, {"text": "Useful announcement"}]`)
- })
-
- client := NewClient(httpClient)
- tweets, _, err := client.Timelines.HomeTimeline(&HomeTimelineParams{SinceID: 589147592367431680, ExcludeReplies: Bool(false)})
- expected := []Tweet{Tweet{Text: "Live on #Periscope"}, Tweet{Text: "Clickbait journalism"}, Tweet{Text: "Useful announcement"}}
- assert.Nil(t, err)
- assert.Equal(t, expected, tweets)
-}
-
-func TestTimelineService_MentionTimeline(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/mentions_timeline.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"count": "20", "include_entities": "false"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `[{"text": "@dghubble can I get verified?"}, {"text": "@dghubble why are gophers so great?"}]`)
- })
-
- client := NewClient(httpClient)
- tweets, _, err := client.Timelines.MentionTimeline(&MentionTimelineParams{Count: 20, IncludeEntities: Bool(false)})
- expected := []Tweet{Tweet{Text: "@dghubble can I get verified?"}, Tweet{Text: "@dghubble why are gophers so great?"}}
- assert.Nil(t, err)
- assert.Equal(t, expected, tweets)
-}
-
-func TestTimelineService_RetweetsOfMeTimeline(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/statuses/retweets_of_me.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"trim_user": "false", "include_user_entities": "false"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `[{"text": "RT Twitter UK edition"}, {"text": "RT Triply-replicated Gophers"}]`)
- })
-
- client := NewClient(httpClient)
- tweets, _, err := client.Timelines.RetweetsOfMeTimeline(&RetweetsOfMeTimelineParams{TrimUser: Bool(false), IncludeUserEntities: Bool(false)})
- expected := []Tweet{Tweet{Text: "RT Twitter UK edition"}, Tweet{Text: "RT Triply-replicated Gophers"}}
- assert.Nil(t, err)
- assert.Equal(t, expected, tweets)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/twitter.go b/vendor/src/github.com/dghubble/go-twitter/twitter/twitter.go
deleted file mode 100644
index 01f53a2..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/twitter.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package twitter
-
-import (
- "net/http"
-
- "github.com/dghubble/sling"
-)
-
-const twitterAPI = "https://api.twitter.com/1.1/"
-
-// Client is a Twitter client for making Twitter API requests.
-type Client struct {
- sling *sling.Sling
- // Twitter API Services
- Accounts *AccountService
- Statuses *StatusService
- Timelines *TimelineService
- Users *UserService
- Followers *FollowerService
- DirectMessages *DirectMessageService
- Streams *StreamService
-}
-
-// NewClient returns a new Client.
-func NewClient(httpClient *http.Client) *Client {
- base := sling.New().Client(httpClient).Base(twitterAPI)
- return &Client{
- sling: base,
- Accounts: newAccountService(base.New()),
- Statuses: newStatusService(base.New()),
- Timelines: newTimelineService(base.New()),
- Users: newUserService(base.New()),
- Followers: newFollowerService(base.New()),
- DirectMessages: newDirectMessageService(base.New()),
- Streams: newStreamService(httpClient, base.New()),
- }
-}
-
-// Bool returns a new pointer to the given bool value.
-func Bool(v bool) *bool {
- ptr := new(bool)
- *ptr = v
- return ptr
-}
-
-// Float returns a new pointer to the given float64 value.
-func Float(v float64) *float64 {
- ptr := new(float64)
- *ptr = v
- return ptr
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/twitter_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/twitter_test.go
deleted file mode 100644
index 4b7ca29..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/twitter_test.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package twitter
-
-import (
- "net/http"
- "net/http/httptest"
- "net/url"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-var defaultTestTimeout = time.Second * 1
-
-// testServer returns an http Client, ServeMux, and Server. The client proxies
-// requests to the server and handlers can be registered on the mux to handle
-// requests. The caller must close the test server.
-func testServer() (*http.Client, *http.ServeMux, *httptest.Server) {
- mux := http.NewServeMux()
- server := httptest.NewServer(mux)
- transport := &RewriteTransport{&http.Transport{
- Proxy: func(req *http.Request) (*url.URL, error) {
- return url.Parse(server.URL)
- },
- }}
- client := &http.Client{Transport: transport}
- return client, mux, server
-}
-
-// RewriteTransport rewrites https requests to http to avoid TLS cert issues
-// during testing.
-type RewriteTransport struct {
- Transport http.RoundTripper
-}
-
-// RoundTrip rewrites the request scheme to http and calls through to the
-// composed RoundTripper or if it is nil, to the http.DefaultTransport.
-func (t *RewriteTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- req.URL.Scheme = "http"
- if t.Transport == nil {
- return http.DefaultTransport.RoundTrip(req)
- }
- return t.Transport.RoundTrip(req)
-}
-
-func assertMethod(t *testing.T, expectedMethod string, req *http.Request) {
- assert.Equal(t, expectedMethod, req.Method)
-}
-
-// assertQuery tests that the Request has the expected url query key/val pairs
-func assertQuery(t *testing.T, expected map[string]string, req *http.Request) {
- queryValues := req.URL.Query()
- expectedValues := url.Values{}
- for key, value := range expected {
- expectedValues.Add(key, value)
- }
- assert.Equal(t, expectedValues, queryValues)
-}
-
-// assertPostForm tests that the Request has the expected key values pairs url
-// encoded in its Body
-func assertPostForm(t *testing.T, expected map[string]string, req *http.Request) {
- req.ParseForm() // parses request Body to put url.Values in r.Form/r.PostForm
- expectedValues := url.Values{}
- for key, value := range expected {
- expectedValues.Add(key, value)
- }
- assert.Equal(t, expectedValues, req.Form)
-}
-
-// assertDone asserts that the empty struct channel is closed before the given
-// timeout elapses.
-func assertDone(t *testing.T, ch <-chan struct{}, timeout time.Duration) {
- select {
- case <-ch:
- _, more := <-ch
- assert.False(t, more)
- case <-time.After(timeout):
- t.Errorf("expected channel to be closed within timeout %v", timeout)
- }
-}
-
-// assertClosed asserts that the channel is closed before the given timeout
-// elapses.
-func assertClosed(t *testing.T, ch <-chan interface{}, timeout time.Duration) {
- select {
- case <-ch:
- _, more := <-ch
- assert.False(t, more)
- case <-time.After(timeout):
- t.Errorf("expected channel to be closed within timeout %v", timeout)
- }
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/users.go b/vendor/src/github.com/dghubble/go-twitter/twitter/users.go
deleted file mode 100644
index 391f364..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/users.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package twitter
-
-import (
- "net/http"
-
- "github.com/dghubble/sling"
-)
-
-// User represents a Twitter User.
-// https://dev.twitter.com/overview/api/users
-type User struct {
- ContributorsEnabled bool `json:"contributors_enabled"`
- CreatedAt string `json:"created_at"`
- DefaultProfile bool `json:"default_profile"`
- DefaultProfileImage bool `json:"default_profile_image"`
- Description string `json:"description"`
- Email string `json:"email"`
- Entities *UserEntities `json:"entities"`
- FavouritesCount int `json:"favourites_count"`
- FollowRequestSent bool `json:"follow_request_sent"`
- Following bool `json:"following"`
- FollowersCount int `json:"followers_count"`
- FriendsCount int `json:"friends_count"`
- GeoEnabled bool `json:"geo_enabled"`
- ID int64 `json:"id"`
- IDStr string `json:"id_str"`
- IsTranslator bool `json:"id_translator"`
- Lang string `json:"lang"`
- ListedCount int `json:"listed_count"`
- Location string `json:"location"`
- Name string `json:"name"`
- Notifications bool `json:"notifications"`
- ProfileBackgroundColor string `json:"profile_background_color"`
- ProfileBackgroundImageURL string `json:"profile_background_image_url"`
- ProfileBackgroundImageURLHttps string `json:"profile_background_image_url_https"`
- ProfileBackgroundTile bool `json:"profile_background_tile"`
- ProfileBannerURL string `json:"profile_banner_url"`
- ProfileImageURL string `json:"profile_image_url"`
- ProfileImageURLHttps string `json:"profile_image_url_https"`
- ProfileLinkColor string `json:"profile_link_color"`
- ProfileSidebarBorderColor string `json:"profile_sidebar_border_color"`
- ProfileSidebarFillColor string `json:"profile_sidebar_fill_color"`
- ProfileTextColor string `json:"profile_text_color"`
- ProfileUseBackgroundImage bool `json:"profile_use_background_image"`
- Protected bool `json:"protected"`
- ScreenName string `json:"screen_name"`
- ShowAllInlineMedia bool `json:"show_all_inline_media"`
- Status *Tweet `json:"status"`
- StatusesCount int `json:"statuses_count"`
- Timezone string `json:"time_zone"`
- URL string `json:"url"`
- UtcOffset int `json:"utc_offset"`
- Verified bool `json:"verified"`
- WithheldInCountries string `json:"withheld_in_countries"`
- WithholdScope string `json:"withheld_scope"`
-}
-
-// UserService provides methods for accessing Twitter user API endpoints.
-type UserService struct {
- sling *sling.Sling
-}
-
-// newUserService returns a new UserService.
-func newUserService(sling *sling.Sling) *UserService {
- return &UserService{
- sling: sling.Path("users/"),
- }
-}
-
-// UserShowParams are the parameters for UserService.Show.
-type UserShowParams struct {
- UserID int64 `url:"user_id,omitempty"`
- ScreenName string `url:"screen_name,omitempty"`
- IncludeEntities *bool `url:"include_entities,omitempty"` // whether 'status' should include entities
-}
-
-// Show returns the requested User.
-// https://dev.twitter.com/rest/reference/get/users/show
-func (s *UserService) Show(params *UserShowParams) (*User, *http.Response, error) {
- user := new(User)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("show.json").QueryStruct(params).Receive(user, apiError)
- return user, resp, relevantError(err, *apiError)
-}
-
-// UserLookupParams are the parameters for UserService.Lookup.
-type UserLookupParams struct {
- UserID []int64 `url:"user_id,omitempty,comma"`
- ScreenName []string `url:"screen_name,omitempty,comma"`
- IncludeEntities *bool `url:"include_entities,omitempty"` // whether 'status' should include entities
-}
-
-// Lookup returns the requested Users as a slice.
-// https://dev.twitter.com/rest/reference/get/users/lookup
-func (s *UserService) Lookup(params *UserLookupParams) ([]User, *http.Response, error) {
- users := new([]User)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("lookup.json").QueryStruct(params).Receive(users, apiError)
- return *users, resp, relevantError(err, *apiError)
-}
-
-// UserSearchParams are the parameters for UserService.Search.
-type UserSearchParams struct {
- Query string `url:"q,omitempty"`
- Page int `url:"page,omitempty"` // 1-based page number
- Count int `url:"count,omitempty"`
- IncludeEntities *bool `url:"include_entities,omitempty"` // whether 'status' should include entities
-}
-
-// Search queries public user accounts.
-// Requires a user auth context.
-// https://dev.twitter.com/rest/reference/get/users/search
-func (s *UserService) Search(query string, params *UserSearchParams) ([]User, *http.Response, error) {
- if params == nil {
- params = &UserSearchParams{}
- }
- params.Query = query
- users := new([]User)
- apiError := new(APIError)
- resp, err := s.sling.New().Get("search.json").QueryStruct(params).Receive(users, apiError)
- return *users, resp, relevantError(err, *apiError)
-}
diff --git a/vendor/src/github.com/dghubble/go-twitter/twitter/users_test.go b/vendor/src/github.com/dghubble/go-twitter/twitter/users_test.go
deleted file mode 100644
index 54e6c5f..0000000
--- a/vendor/src/github.com/dghubble/go-twitter/twitter/users_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package twitter
-
-import (
- "fmt"
- "net/http"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestUserService_Show(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/users/show.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"screen_name": "xkcdComic"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"name": "XKCD Comic", "favourites_count": 2}`)
- })
-
- client := NewClient(httpClient)
- user, _, err := client.Users.Show(&UserShowParams{ScreenName: "xkcdComic"})
- expected := &User{Name: "XKCD Comic", FavouritesCount: 2}
- assert.Nil(t, err)
- assert.Equal(t, expected, user)
-}
-
-func TestUserService_LookupWithIds(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/users/lookup.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"user_id": "113419064,623265148"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `[{"screen_name": "golang"}, {"screen_name": "dghubble"}]`)
- })
-
- client := NewClient(httpClient)
- users, _, err := client.Users.Lookup(&UserLookupParams{UserID: []int64{113419064, 623265148}})
- expected := []User{User{ScreenName: "golang"}, User{ScreenName: "dghubble"}}
- assert.Nil(t, err)
- assert.Equal(t, expected, users)
-}
-
-func TestUserService_LookupWithScreenNames(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/users/lookup.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"screen_name": "foo,bar"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `[{"name": "Foo"}, {"name": "Bar"}]`)
- })
-
- client := NewClient(httpClient)
- users, _, err := client.Users.Lookup(&UserLookupParams{ScreenName: []string{"foo", "bar"}})
- expected := []User{User{Name: "Foo"}, User{Name: "Bar"}}
- assert.Nil(t, err)
- assert.Equal(t, expected, users)
-}
-
-func TestUserService_Search(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/users/search.json", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "GET", r)
- assertQuery(t, map[string]string{"count": "11", "q": "news"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `[{"name": "BBC"}, {"name": "BBC Breaking News"}]`)
- })
-
- client := NewClient(httpClient)
- users, _, err := client.Users.Search("news", &UserSearchParams{Query: "override me", Count: 11})
- expected := []User{User{Name: "BBC"}, User{Name: "BBC Breaking News"}}
- assert.Nil(t, err)
- assert.Equal(t, expected, users)
-}
-
-func TestUserService_SearchHandlesNilParams(t *testing.T) {
- httpClient, mux, server := testServer()
- defer server.Close()
-
- mux.HandleFunc("/1.1/users/search.json", func(w http.ResponseWriter, r *http.Request) {
- assertQuery(t, map[string]string{"q": "news"}, r)
- })
- client := NewClient(httpClient)
- client.Users.Search("news", nil)
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/CHANGES.md b/vendor/src/github.com/dghubble/oauth1/CHANGES.md
deleted file mode 100644
index 975c612..0000000
--- a/vendor/src/github.com/dghubble/oauth1/CHANGES.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# OAuth1 Changelog
-
-## v0.4.0 (2016-04-20)
-
-* Add a Signer field to the Config to allow custom Signer implementations.
-* Use the HMACSigner by default. This provides the same signing behavior as in previous versions (HMAC-SHA1).
-* Add an RSASigner for "RSA-SHA1" OAuth1 Providers.
-* Add missing Authorization Header quotes around OAuth parameter values. Many providers allowed these quotes to be missing.
-* Change `Signer` to be a signer interface.
-* Remove the old Signer methods `SetAccessTokenAuthHeader`, `SetRequestAuthHeader`, and `SetRequestTokenAuthHeader`.
-
-## v0.3.0 (2015-09-13)
-
-* Added `NoContext` which may be used in most cases.
-* Allowed Transport Base http.RoundTripper to be set through a ctx.
-* Changed `NewClient` to require a context.Context.
-* Changed `Config.Client` to require a context.Context.
-
-## v.0.2.0 (2015-08-30)
-
-* Improved OAuth 1 spec compliance and test coverage.
-* Added `func StaticTokenSource(*Token) TokenSource`
-* Added `ParseAuthorizationCallback` function. Removed `Config.HandleAuthorizationCallback` method.
-* Changed `Config` method signatures to allow an interface to be defined for the OAuth1 authorization flow. Gives users of this package (and downstream packages) the freedom to use other implementations if they wish.
-* Removed `RequestToken` in favor of passing token and secret value strings.
-* Removed `ReuseTokenSource` struct, it was effectively a static source. Replaced by `StaticTokenSource`.
-
-## v0.1.0 (2015-04-26)
-
-* Initial OAuth1 support for obtaining authorization and making authorized requests.
\ No newline at end of file
diff --git a/vendor/src/github.com/dghubble/oauth1/LICENSE b/vendor/src/github.com/dghubble/oauth1/LICENSE
deleted file mode 100644
index 2718840..0000000
--- a/vendor/src/github.com/dghubble/oauth1/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Dalton Hubble
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/src/github.com/dghubble/oauth1/README.md b/vendor/src/github.com/dghubble/oauth1/README.md
deleted file mode 100644
index 36a1d21..0000000
--- a/vendor/src/github.com/dghubble/oauth1/README.md
+++ /dev/null
@@ -1,125 +0,0 @@
-
-# OAuth1 [![Build Status](https://travis-ci.org/dghubble/oauth1.png)](https://travis-ci.org/dghubble/oauth1) [![GoDoc](http://godoc.org/github.com/dghubble/oauth1?status.png)](http://godoc.org/github.com/dghubble/oauth1)
-
-
-OAauth1 is a Go implementation of the [OAuth 1 spec](https://tools.ietf.org/html/rfc5849).
-
-It allows end-users to authorize a client (consumer) to access protected resources on his/her behalf and to make signed and authorized requests.
-
-Package `oauth1` takes design cues from [golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2), to provide an analogous API and an `http.Client` with a Transport which signs/authorizes requests.
-
-## Install
-
- go get github.com/dghubble/oauth1
-
-## Docs
-
-Read [GoDoc](https://godoc.org/github.com/dghubble/oauth1)
-
-## Usage
-
-Package `oauth1` implements the OAuth1 authorization flow and provides an `http.Client` which can sign and authorize OAuth1 requests.
-
-To implement "Login with X", use the [gologin](https://github.com/dghubble/gologin) packages which provide login handlers for OAuth1 and OAuth2 providers.
-
-To call the Twitter, Digits, or Tumblr OAuth1 APIs, use the higher level Go API clients.
-
-* [Twitter](https://github.com/dghubble/go-twitter)
-* [Digits](https://github.com/dghubble/go-digits)
-* [Tumblr](https://github.com/benfb/go-tumblr)
-
-### Authorization Flow
-
-Perform the OAuth 1 authorization flow to ask a user to grant an application access to his/her resources via an access token.
-
-```go
-import (
- "github.com/dghubble/oauth1"
- "github.com/dghubble/oauth1/twitter""
-)
-...
-
-config := oauth1.Config{
- ConsumerKey: "consumerKey",
- ConsumerSecret: "consumerSecret",
- CallbackURL: "http://mysite.com/oauth/twitter/callback",
- Endpoint: twitter.AuthorizeEndpoint,
-}
-```
-
-1. When a user performs an action (e.g. "Login with X" button calls "/login" route) get an OAuth1 request token (temporary credentials).
-
- ```go
- requestToken, requestSecret, err = config.RequestToken()
- // handle err
- ```
-
-2. Obtain authorization from the user by redirecting them to the OAuth1 provider's authorization URL to grant the application access.
-
- ```go
- authorizationURL, err := config.AuthorizationURL(requestToken)
- // handle err
- http.Redirect(w, req, authorizationURL.String(), htt.StatusFound)
- ```
-
- Receive the callback from the OAuth1 provider in a handler.
-
- ```go
- requestToken, verifier, err := oauth1.ParseAuthorizationCallback(req)
- // handle err
- ```
-
-3. Acquire the access token (token credentials) which can later be used to make requests on behalf of the user.
-
- ```go
- accessToken, accessSecret, err := config.AccessToken(requestToken, requestSecret, verifier)
- // handle error
- token := NewToken(accessToken, accessSecret)
- ```
-
-Check the [examples](examples) to see this authorization flow in action from the command line, with Twitter PIN-based login and Tumblr login.
-
-### Authorized Requests
-
-Use an access `Token` to make authorized requests on behalf of a user.
-
-```go
-import (
- "github.com/dghubble/oauth1"
-)
-
-func main() {
- config := oauth1.NewConfig("consumerKey", "consumerSecret")
- token := oauth1.NewToken("token", "tokenSecret")
-
- // httpClient will automatically authorize http.Request's
- httpClient := config.Client(oauth1.NoContext, token)
-
- // example Twitter API request
- path := "https://api.twitter.com/1.1/statuses/home_timeline.json?count=2"
- resp, _ := httpClient.Get(path)
- defer resp.Body.Close()
- body, _ := ioutil.ReadAll(resp.Body)
- fmt.Printf("Raw Response Body:\n%v\n", string(body))
-}
-```
-
-Check the [examples](examples) to see Twitter and Tumblr requests in action.
-
-### Concepts
-
-An `Endpoint` groups an OAuth provider's token and authorization URL endpoints.Endpoints for common providers are provided in subpackages.
-
-A `Config` stores a consumer application's consumer key and secret, the registered callback URL, and the `Endpoint` to which the consumer is registered. It provides OAuth1 authorization flow methods.
-
-An OAuth1 `Token` is an access token which can be used to make signed requests on behalf of a user. See [Authorized Requests](#Authorized Requests) for details.
-
-If you've used the [golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) package for OAuth2 before, this organization should be familiar.
-
-## Contributing
-
-See the [Contributing Guide](https://gist.github.com/dghubble/be682c123727f70bcfe7).
-
-## License
-
-[MIT License](LICENSE)
diff --git a/vendor/src/github.com/dghubble/oauth1/auther.go b/vendor/src/github.com/dghubble/oauth1/auther.go
deleted file mode 100644
index 2de98d0..0000000
--- a/vendor/src/github.com/dghubble/oauth1/auther.go
+++ /dev/null
@@ -1,265 +0,0 @@
-package oauth1
-
-import (
- "bytes"
- "crypto/rand"
- "encoding/base64"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-const (
- authorizationHeaderParam = "Authorization"
- authorizationPrefix = "OAuth " // trailing space is intentional
- oauthConsumerKeyParam = "oauth_consumer_key"
- oauthNonceParam = "oauth_nonce"
- oauthSignatureParam = "oauth_signature"
- oauthSignatureMethodParam = "oauth_signature_method"
- oauthTimestampParam = "oauth_timestamp"
- oauthTokenParam = "oauth_token"
- oauthVersionParam = "oauth_version"
- oauthCallbackParam = "oauth_callback"
- oauthVerifierParam = "oauth_verifier"
- defaultOauthVersion = "1.0"
- contentType = "Content-Type"
- formContentType = "application/x-www-form-urlencoded"
-)
-
-// clock provides a interface for current time providers. A Clock can be used
-// in place of calling time.Now() directly.
-type clock interface {
- Now() time.Time
-}
-
-// A noncer provides random nonce strings.
-type noncer interface {
- Nonce() string
-}
-
-// auther adds an "OAuth" Authorization header field to requests.
-type auther struct {
- config *Config
- clock clock
- noncer noncer
-}
-
-func newAuther(config *Config) *auther {
- return &auther{
- config: config,
- }
-}
-
-// setRequestTokenAuthHeader adds the OAuth1 header for the request token
-// request (temporary credential) according to RFC 5849 2.1.
-func (a *auther) setRequestTokenAuthHeader(req *http.Request) error {
- oauthParams := a.commonOAuthParams()
- oauthParams[oauthCallbackParam] = a.config.CallbackURL
- params, err := collectParameters(req, oauthParams)
- if err != nil {
- return err
- }
- signatureBase := signatureBase(req, params)
- signature, err := a.signer().Sign("", signatureBase)
- if err != nil {
- return err
- }
- oauthParams[oauthSignatureParam] = signature
- req.Header.Set(authorizationHeaderParam, authHeaderValue(oauthParams))
- return nil
-}
-
-// setAccessTokenAuthHeader sets the OAuth1 header for the access token request
-// (token credential) according to RFC 5849 2.3.
-func (a *auther) setAccessTokenAuthHeader(req *http.Request, requestToken, requestSecret, verifier string) error {
- oauthParams := a.commonOAuthParams()
- oauthParams[oauthTokenParam] = requestToken
- oauthParams[oauthVerifierParam] = verifier
- params, err := collectParameters(req, oauthParams)
- if err != nil {
- return err
- }
- signatureBase := signatureBase(req, params)
- signature, err := a.signer().Sign(requestSecret, signatureBase)
- if err != nil {
- return err
- }
- oauthParams[oauthSignatureParam] = signature
- req.Header.Set(authorizationHeaderParam, authHeaderValue(oauthParams))
- return nil
-}
-
-// setRequestAuthHeader sets the OAuth1 header for making authenticated
-// requests with an AccessToken (token credential) according to RFC 5849 3.1.
-func (a *auther) setRequestAuthHeader(req *http.Request, accessToken *Token) error {
- oauthParams := a.commonOAuthParams()
- oauthParams[oauthTokenParam] = accessToken.Token
- params, err := collectParameters(req, oauthParams)
- if err != nil {
- return err
- }
- signatureBase := signatureBase(req, params)
- signature, err := a.signer().Sign(accessToken.TokenSecret, signatureBase)
- if err != nil {
- return err
- }
- oauthParams[oauthSignatureParam] = signature
- req.Header.Set(authorizationHeaderParam, authHeaderValue(oauthParams))
- return nil
-}
-
-// commonOAuthParams returns a map of the common OAuth1 protocol parameters,
-// excluding the oauth_signature parameter.
-func (a *auther) commonOAuthParams() map[string]string {
- return map[string]string{
- oauthConsumerKeyParam: a.config.ConsumerKey,
- oauthSignatureMethodParam: a.signer().Name(),
- oauthTimestampParam: strconv.FormatInt(a.epoch(), 10),
- oauthNonceParam: a.nonce(),
- oauthVersionParam: defaultOauthVersion,
- }
-}
-
-// Returns a base64 encoded random 32 byte string.
-func (a *auther) nonce() string {
- if a.noncer != nil {
- return a.noncer.Nonce()
- }
- b := make([]byte, 32)
- rand.Read(b)
- return base64.StdEncoding.EncodeToString(b)
-}
-
-// Returns the Unix epoch seconds.
-func (a *auther) epoch() int64 {
- if a.clock != nil {
- return a.clock.Now().Unix()
- }
- return time.Now().Unix()
-}
-
-// Returns the Config's Signer or the default Signer.
-func (a *auther) signer() Signer {
- if a.config.Signer != nil {
- return a.config.Signer
- }
- return &HMACSigner{ConsumerSecret: a.config.ConsumerSecret}
-}
-
-// authHeaderValue formats OAuth parameters according to RFC 5849 3.5.1. OAuth
-// params are percent encoded, sorted by key (for testability), and joined by
-// "=" into pairs. Pairs are joined with a ", " comma separator into a header
-// string.
-// The given OAuth params should include the "oauth_signature" key.
-func authHeaderValue(oauthParams map[string]string) string {
- pairs := sortParameters(encodeParameters(oauthParams), `%s="%s"`)
- return authorizationPrefix + strings.Join(pairs, ", ")
-}
-
-// encodeParameters percent encodes parameter keys and values according to
-// RFC5849 3.6 and RFC3986 2.1 and returns a new map.
-func encodeParameters(params map[string]string) map[string]string {
- encoded := map[string]string{}
- for key, value := range params {
- encoded[PercentEncode(key)] = PercentEncode(value)
- }
- return encoded
-}
-
-// sortParameters sorts parameters by key and returns a slice of key/value
-// pairs formatted with the given format string (e.g. "%s=%s").
-func sortParameters(params map[string]string, format string) []string {
- // sort by key
- keys := make([]string, len(params))
- i := 0
- for key := range params {
- keys[i] = key
- i++
- }
- sort.Strings(keys)
- // parameter join
- pairs := make([]string, len(params))
- for i, key := range keys {
- pairs[i] = fmt.Sprintf(format, key, params[key])
- }
- return pairs
-}
-
-// collectParameters collects request parameters from the request query, OAuth
-// parameters (which should exclude oauth_signature), and the request body
-// provided the body is single part, form encoded, and the form content type
-// header is set. The returned map of collected parameter keys and values
-// follow RFC 5849 3.4.1.3, except duplicate parameters are not supported.
-func collectParameters(req *http.Request, oauthParams map[string]string) (map[string]string, error) {
- // add oauth, query, and body parameters into params
- params := map[string]string{}
- for key, value := range req.URL.Query() {
- // most backends do not accept duplicate query keys
- params[key] = value[0]
- }
- if req.Body != nil && req.Header.Get(contentType) == formContentType {
- // reads data to a []byte, draining req.Body
- b, err := ioutil.ReadAll(req.Body)
- if err != nil {
- return nil, err
- }
- values, err := url.ParseQuery(string(b))
- if err != nil {
- return nil, err
- }
- for key, value := range values {
- // not supporting params with duplicate keys
- params[key] = value[0]
- }
- // reinitialize Body with ReadCloser over the []byte
- req.Body = ioutil.NopCloser(bytes.NewReader(b))
- }
- for key, value := range oauthParams {
- params[key] = value
- }
- return params, nil
-}
-
-// signatureBase combines the uppercase request method, percent encoded base
-// string URI, and normalizes the request parameters int a parameter string.
-// Returns the OAuth1 signature base string according to RFC5849 3.4.1.
-func signatureBase(req *http.Request, params map[string]string) string {
- method := strings.ToUpper(req.Method)
- baseURL := baseURI(req)
- parameterString := normalizedParameterString(params)
- // signature base string constructed accoding to 3.4.1.1
- baseParts := []string{method, PercentEncode(baseURL), PercentEncode(parameterString)}
- return strings.Join(baseParts, "&")
-}
-
-// baseURI returns the base string URI of a request according to RFC 5849
-// 3.4.1.2. The scheme and host are lowercased, the port is dropped if it
-// is 80 or 443, and the path minus query parameters is included.
-func baseURI(req *http.Request) string {
- scheme := strings.ToLower(req.URL.Scheme)
- host := strings.ToLower(req.URL.Host)
- if hostPort := strings.Split(host, ":"); len(hostPort) == 2 && (hostPort[1] == "80" || hostPort[1] == "443") {
- host = hostPort[0]
- }
- // TODO: use req.URL.EscapedPath() once Go 1.5 is more generally adopted
- // For now, hacky workaround accomplishes the same internal escaping mode
- // escape(u.Path, encodePath) for proper compliance with the OAuth1 spec.
- path := req.URL.Path
- if path != "" {
- path = strings.Split(req.URL.RequestURI(), "?")[0]
- }
- return fmt.Sprintf("%v://%v%v", scheme, host, path)
-}
-
-// parameterString normalizes collected OAuth parameters (which should exclude
-// oauth_signature) into a parameter string as defined in RFC 5894 3.4.1.3.2.
-// The parameters are encoded, sorted by key, keys and values joined with "&",
-// and pairs joined with "=" (e.g. foo=bar&q=gopher).
-func normalizedParameterString(params map[string]string) string {
- return strings.Join(sortParameters(encodeParameters(params), "%s=%s"), "&")
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/auther_test.go b/vendor/src/github.com/dghubble/oauth1/auther_test.go
deleted file mode 100644
index 4cd28a0..0000000
--- a/vendor/src/github.com/dghubble/oauth1/auther_test.go
+++ /dev/null
@@ -1,244 +0,0 @@
-package oauth1
-
-import (
- "net/http"
- "net/url"
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestCommonOAuthParams(t *testing.T) {
- config := &Config{ConsumerKey: "some_consumer_key"}
- auther := &auther{config, &fixedClock{time.Unix(50037133, 0)}, &fixedNoncer{"some_nonce"}}
- expectedParams := map[string]string{
- "oauth_consumer_key": "some_consumer_key",
- "oauth_signature_method": "HMAC-SHA1",
- "oauth_timestamp": "50037133",
- "oauth_nonce": "some_nonce",
- "oauth_version": "1.0",
- }
- assert.Equal(t, expectedParams, auther.commonOAuthParams())
-}
-
-func TestNonce(t *testing.T) {
- auther := &auther{}
- nonce := auther.nonce()
- // assert that 32 bytes (256 bites) become 44 bytes since a base64 byte
- // zeros the 2 high bits. 3 bytes convert to 4 base64 bytes, 40 base64 bytes
- // represent the first 30 of 32 bytes, = padding adds another 4 byte group.
- // base64 bytes = 4 * floor(bytes/3) + 4
- assert.Equal(t, 44, len([]byte(nonce)))
-}
-
-func TestEpoch(t *testing.T) {
- a := &auther{}
- // assert that a real time is used by default
- assert.InEpsilon(t, time.Now().Unix(), a.epoch(), 1)
- // assert that the fixed clock can be used for testing
- a = &auther{clock: &fixedClock{time.Unix(50037133, 0)}}
- assert.Equal(t, int64(50037133), a.epoch())
-}
-
-func TestSigner_Default(t *testing.T) {
- config := &Config{ConsumerSecret: "consumer_secret"}
- a := newAuther(config)
- // echo -n "hello world" | openssl dgst -sha1 -hmac "consumer_secret&token_secret" -binary | base64
- expectedSignature := "BE0uILOruKfSXd4UzYlLJDfOq08="
- // assert that the default signer produces the expected HMAC-SHA1 digest
- method := a.signer().Name()
- digest, err := a.signer().Sign("token_secret", "hello world")
- assert.Nil(t, err)
- assert.Equal(t, "HMAC-SHA1", method)
- assert.Equal(t, expectedSignature, digest)
-}
-
-type identitySigner struct{}
-
-func (s *identitySigner) Name() string {
- return "identity"
-}
-
-func (s *identitySigner) Sign(tokenSecret, message string) (string, error) {
- return message, nil
-}
-
-func TestSigner_Custom(t *testing.T) {
- config := &Config{
- ConsumerSecret: "consumer_secret",
- Signer: &identitySigner{},
- }
- a := newAuther(config)
- // assert that the custom signer is used
- method := a.signer().Name()
- digest, err := a.signer().Sign("secret", "hello world")
- assert.Nil(t, err)
- assert.Equal(t, "identity", method)
- assert.Equal(t, "hello world", digest)
-}
-
-func TestAuthHeaderValue(t *testing.T) {
- cases := []struct {
- params map[string]string
- authHeader string
- }{
- {map[string]string{}, "OAuth "},
- {map[string]string{"a": "b"}, `OAuth a="b"`},
- {map[string]string{"a": "b", "c": "d", "e": "f", "1": "2"}, `OAuth 1="2", a="b", c="d", e="f"`},
- {map[string]string{"/= +doencode": "/= +doencode"}, `OAuth %2F%3D%20%2Bdoencode="%2F%3D%20%2Bdoencode"`},
- {map[string]string{"-._~dontencode": "-._~dontencode"}, `OAuth -._~dontencode="-._~dontencode"`},
- }
- for _, c := range cases {
- assert.Equal(t, c.authHeader, authHeaderValue(c.params))
- }
-}
-
-func TestEncodeParameters(t *testing.T) {
- input := map[string]string{
- "a": "Dogs, Cats & Mice",
- "☃": "snowman",
- "ル": "ル",
- }
- expected := map[string]string{
- "a": "Dogs%2C%20Cats%20%26%20Mice",
- "%E2%98%83": "snowman",
- "%E3%83%AB": "%E3%83%AB",
- }
- assert.Equal(t, expected, encodeParameters(input))
-}
-
-func TestSortParameters(t *testing.T) {
- input := map[string]string{
- ".": "ape",
- "5.6": "bat",
- "rsa": "cat",
- "%20": "dog",
- "%E3%83%AB": "eel",
- "dup": "fox",
- //"dup": "fix", // duplicate keys not supported
- }
- expected := []string{
- "%20=dog",
- "%E3%83%AB=eel",
- ".=ape",
- "5.6=bat",
- "dup=fox",
- "rsa=cat",
- }
- assert.Equal(t, expected, sortParameters(input, "%s=%s"))
-}
-
-func TestCollectParameters(t *testing.T) {
- // example from RFC 5849 3.4.1.3.1
- oauthParams := map[string]string{
- "oauth_token": "kkk9d7dh3k39sjv7",
- "oauth_consumer_key": "9djdj82h48djs9d2",
- "oauth_signature_method": "HMAC-SHA1",
- "oauth_timestamp": "137131201",
- "oauth_nonce": "7d8f3e4a",
- }
- values := url.Values{}
- values.Add("c2", "")
- values.Add("plus", "2 q") // duplicate keys not supported, a3 -> plus
- req, err := http.NewRequest("POST", "/request?b5=%3D%253D&a3=a&c%40=&a2=r%20b", strings.NewReader(values.Encode()))
- assert.Nil(t, err)
- req.Header.Set(contentType, formContentType)
- params, err := collectParameters(req, oauthParams)
- // assert parameters were collected from oauthParams, the query, and form body
- expected := map[string]string{
- "b5": "=%3D",
- "a3": "a",
- "c@": "",
- "a2": "r b",
- "oauth_token": "kkk9d7dh3k39sjv7",
- "oauth_consumer_key": "9djdj82h48djs9d2",
- "oauth_signature_method": "HMAC-SHA1",
- "oauth_timestamp": "137131201",
- "oauth_nonce": "7d8f3e4a",
- "c2": "",
- "plus": "2 q",
- }
- assert.Nil(t, err)
- assert.Equal(t, expected, params)
- // RFC 5849 3.4.1.3.1 requires a {"a3"="2 q"} be form encoded to "a3=2+q" in
- // the application/x-www-form-urlencoded body. The parameter "2+q" should be
- // read as "2 q" and percent encoded to "2%20q".
- // In Go, data is form encoded by calling Encode on url.Values{} (URL
- // encoding) and decoded with url.ParseQuery to url.Values. So the encoding
- // of "2 q" to "2+q" and decoding back to "2 q" is handled and then params
- // are percent encoded.
- // http://golang.org/src/net/http/client.go#L496
- // http://golang.org/src/net/http/request.go#L837
-}
-
-func TestSignatureBase(t *testing.T) {
- reqA, err := http.NewRequest("get", "HTTPS://HELLO.IO?q=test", nil)
- assert.Nil(t, err)
- reqB, err := http.NewRequest("POST", "http://hello.io:8080", nil)
- assert.Nil(t, err)
- cases := []struct {
- req *http.Request
- params map[string]string
- signatureBase string
- }{
- {reqA, map[string]string{"a": "b", "c": "d"}, "GET&https%3A%2F%2Fhello.io&a%3Db%26c%3Dd"},
- {reqB, map[string]string{"a": "b"}, "POST&http%3A%2F%2Fhello.io%3A8080&a%3Db"},
- }
- // assert that method is uppercased, base uri rules applied, queries added, joined by &
- for _, c := range cases {
- base := signatureBase(c.req, c.params)
- assert.Equal(t, c.signatureBase, base)
- }
-}
-
-func TestBaseURI(t *testing.T) {
- reqA, err := http.NewRequest("GET", "HTTP://EXAMPLE.COM:80/r%20v/X?id=123", nil)
- assert.Nil(t, err)
- reqB, err := http.NewRequest("POST", "https://www.example.net:8080/?q=1", nil)
- assert.Nil(t, err)
- reqC, err := http.NewRequest("POST", "https://example.com:443", nil)
- cases := []struct {
- req *http.Request
- baseURI string
- }{
- {reqA, "http://example.com/r%20v/X"},
- {reqB, "https://www.example.net:8080/"},
- {reqC, "https://example.com"},
- }
- for _, c := range cases {
- baseURI := baseURI(c.req)
- assert.Equal(t, c.baseURI, baseURI)
- }
-}
-
-func TestNormalizedParameterString(t *testing.T) {
- simple := map[string]string{
- "a": "b & c",
- "☃": "snowman",
- }
- rfcExample := map[string]string{
- "b5": "=%3D",
- "a3": "a",
- "c@": "",
- "a2": "r b",
- "oauth_token": "kkk9d7dh3k39sjv7",
- "oauth_consumer_key": "9djdj82h48djs9d2",
- "oauth_signature_method": "HMAC-SHA1",
- "oauth_timestamp": "137131201",
- "oauth_nonce": "7d8f3e4a",
- "c2": "",
- "plus": "2 q",
- }
- cases := []struct {
- params map[string]string
- parameterStr string
- }{
- {simple, "%E2%98%83=snowman&a=b%20%26%20c"},
- {rfcExample, "a2=r%20b&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9djdj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7&plus=2%20q"},
- }
- for _, c := range cases {
- assert.Equal(t, c.parameterStr, normalizedParameterString(c.params))
- }
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/config.go b/vendor/src/github.com/dghubble/oauth1/config.go
deleted file mode 100644
index 0f36d87..0000000
--- a/vendor/src/github.com/dghubble/oauth1/config.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package oauth1
-
-import (
- "errors"
- "io/ioutil"
- "net/http"
- "net/url"
-
- "golang.org/x/net/context"
-)
-
-const (
- oauthTokenSecretParam = "oauth_token_secret"
- oauthCallbackConfirmedParam = "oauth_callback_confirmed"
-)
-
-// Config represents an OAuth1 consumer's (client's) key and secret, the
-// callback URL, and the provider Endpoint to which the consumer corresponds.
-type Config struct {
- // Consumer Key (Client Identifier)
- ConsumerKey string
- // Consumer Secret (Client Shared-Secret)
- ConsumerSecret string
- // Callback URL
- CallbackURL string
- // Provider Endpoint specifying OAuth1 endpoint URLs
- Endpoint Endpoint
- // OAuth1 Signer (defaults to HMAC-SHA1)
- Signer Signer
-}
-
-// NewConfig returns a new Config with the given consumer key and secret.
-func NewConfig(consumerKey, consumerSecret string) *Config {
- return &Config{
- ConsumerKey: consumerKey,
- ConsumerSecret: consumerSecret,
- }
-}
-
-// Client returns an HTTP client which uses the provided ctx and access Token.
-func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
- return NewClient(ctx, c, t)
-}
-
-// NewClient returns a new http Client which signs requests via OAuth1.
-func NewClient(ctx context.Context, config *Config, token *Token) *http.Client {
- transport := &Transport{
- Base: contextTransport(ctx),
- source: StaticTokenSource(token),
- auther: newAuther(config),
- }
- return &http.Client{Transport: transport}
-}
-
-// RequestToken obtains a Request token and secret (temporary credential) by
-// POSTing a request (with oauth_callback in the auth header) to the Endpoint
-// RequestTokenURL. The response body form is validated to ensure
-// oauth_callback_confirmed is true. Returns the request token and secret
-// (temporary credentials).
-// See RFC 5849 2.1 Temporary Credentials.
-func (c *Config) RequestToken() (requestToken, requestSecret string, err error) {
- req, err := http.NewRequest("POST", c.Endpoint.RequestTokenURL, nil)
- if err != nil {
- return "", "", err
- }
- err = newAuther(c).setRequestTokenAuthHeader(req)
- if err != nil {
- return "", "", err
- }
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return "", "", err
- }
- // when err is nil, resp contains a non-nil resp.Body which must be closed
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return "", "", err
- }
- // ParseQuery to decode URL-encoded application/x-www-form-urlencoded body
- values, err := url.ParseQuery(string(body))
- if err != nil {
- return "", "", err
- }
- if values.Get(oauthCallbackConfirmedParam) != "true" {
- return "", "", errors.New("oauth1: oauth_callback_confirmed was not true")
- }
- requestToken = values.Get(oauthTokenParam)
- requestSecret = values.Get(oauthTokenSecretParam)
- if requestToken == "" || requestSecret == "" {
- return "", "", errors.New("oauth1: Response missing oauth_token or oauth_token_secret")
- }
- return requestToken, requestSecret, nil
-}
-
-// AuthorizationURL accepts a request token and returns the *url.URL to the
-// Endpoint's authorization page that asks the user (resource owner) for to
-// authorize the consumer to act on his/her/its behalf.
-// See RFC 5849 2.2 Resource Owner Authorization.
-func (c *Config) AuthorizationURL(requestToken string) (*url.URL, error) {
- authorizationURL, err := url.Parse(c.Endpoint.AuthorizeURL)
- if err != nil {
- return nil, err
- }
- values := authorizationURL.Query()
- values.Add(oauthTokenParam, requestToken)
- authorizationURL.RawQuery = values.Encode()
- return authorizationURL, nil
-}
-
-// ParseAuthorizationCallback parses an OAuth1 authorization callback request
-// from a provider server. The oauth_token and oauth_verifier parameters are
-// parsed to return the request token from earlier in the flow and the
-// verifier string.
-// See RFC 5849 2.2 Resource Owner Authorization.
-func ParseAuthorizationCallback(req *http.Request) (requestToken, verifier string, err error) {
- // parse the raw query from the URL into req.Form
- err = req.ParseForm()
- if err != nil {
- return "", "", err
- }
- requestToken = req.Form.Get(oauthTokenParam)
- verifier = req.Form.Get(oauthVerifierParam)
- if requestToken == "" || verifier == "" {
- return "", "", errors.New("oauth1: Request missing oauth_token or oauth_verifier")
- }
- return requestToken, verifier, nil
-}
-
-// AccessToken obtains an access token (token credential) by POSTing a
-// request (with oauth_token and oauth_verifier in the auth header) to the
-// Endpoint AccessTokenURL. Returns the access token and secret (token
-// credentials).
-// See RFC 5849 2.3 Token Credentials.
-func (c *Config) AccessToken(requestToken, requestSecret, verifier string) (accessToken, accessSecret string, err error) {
- req, err := http.NewRequest("POST", c.Endpoint.AccessTokenURL, nil)
- if err != nil {
- return "", "", err
- }
- err = newAuther(c).setAccessTokenAuthHeader(req, requestToken, requestSecret, verifier)
- if err != nil {
- return "", "", err
- }
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return "", "", err
- }
- // when err is nil, resp contains a non-nil resp.Body which must be closed
- defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return "", "", err
- }
- // ParseQuery to decode URL-encoded application/x-www-form-urlencoded body
- values, err := url.ParseQuery(string(body))
- if err != nil {
- return "", "", err
- }
- accessToken = values.Get(oauthTokenParam)
- accessSecret = values.Get(oauthTokenSecretParam)
- if accessToken == "" || accessSecret == "" {
- return "", "", errors.New("oauth1: Response missing oauth_token or oauth_token_secret")
- }
- return accessToken, accessSecret, nil
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/config_test.go b/vendor/src/github.com/dghubble/oauth1/config_test.go
deleted file mode 100644
index 8442c89..0000000
--- a/vendor/src/github.com/dghubble/oauth1/config_test.go
+++ /dev/null
@@ -1,342 +0,0 @@
-package oauth1
-
-import (
- "net/http"
- "net/http/httptest"
- "net/url"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "golang.org/x/net/context"
-)
-
-const expectedVerifier = "some_verifier"
-
-func TestNewConfig(t *testing.T) {
- expectedConsumerKey := "consumer_key"
- expectedConsumerSecret := "consumer_secret"
- config := NewConfig(expectedConsumerKey, expectedConsumerSecret)
- assert.Equal(t, expectedConsumerKey, config.ConsumerKey)
- assert.Equal(t, expectedConsumerSecret, config.ConsumerSecret)
-}
-
-func TestNewClient(t *testing.T) {
- expectedToken := "access_token"
- expectedConsumerKey := "consumer_key"
- config := NewConfig(expectedConsumerKey, "consumer_secret")
- token := NewToken(expectedToken, "access_secret")
- client := config.Client(NoContext, token)
-
- server := newMockServer(func(w http.ResponseWriter, req *http.Request) {
- assert.Equal(t, "GET", req.Method)
- params := parseOAuthParamsOrFail(t, req.Header.Get(authorizationHeaderParam))
- assert.Equal(t, expectedToken, params[oauthTokenParam])
- assert.Equal(t, expectedConsumerKey, params[oauthConsumerKeyParam])
- })
- defer server.Close()
- client.Get(server.URL)
-}
-
-func TestNewClient_DefaultTransport(t *testing.T) {
- client := NewClient(NoContext, NewConfig("t", "s"), NewToken("t", "s"))
- // assert that the client uses the DefaultTransport
- transport, ok := client.Transport.(*Transport)
- assert.True(t, ok)
- assert.Equal(t, http.DefaultTransport, transport.base())
-}
-
-func TestNewClient_ContextClientTransport(t *testing.T) {
- baseTransport := &http.Transport{}
- baseClient := &http.Client{Transport: baseTransport}
- ctx := context.WithValue(NoContext, HTTPClient, baseClient)
- client := NewClient(ctx, NewConfig("t", "s"), NewToken("t", "s"))
- // assert that the client uses the ctx client's Transport as its base RoundTripper
- transport, ok := client.Transport.(*Transport)
- assert.True(t, ok)
- assert.Equal(t, baseTransport, transport.base())
-}
-
-// newRequestTokenServer returns a new httptest.Server for an OAuth1 provider
-// request token endpoint.
-func newRequestTokenServer(t *testing.T, data url.Values) *httptest.Server {
- return newMockServer(func(w http.ResponseWriter, req *http.Request) {
- assert.Equal(t, "POST", req.Method)
- assert.NotEmpty(t, req.Header.Get("Authorization"))
- w.Header().Set(contentType, formContentType)
- w.Write([]byte(data.Encode()))
- })
-}
-
-// newAccessTokenServer returns a new httptest.Server for an OAuth1 provider
-// access token endpoint.
-func newAccessTokenServer(t *testing.T, data url.Values) *httptest.Server {
- return newMockServer(func(w http.ResponseWriter, req *http.Request) {
- assert.Equal(t, "POST", req.Method)
- assert.NotEmpty(t, req.Header.Get("Authorization"))
- params := parseOAuthParamsOrFail(t, req.Header.Get(authorizationHeaderParam))
- assert.Equal(t, expectedVerifier, params[oauthVerifierParam])
- w.Header().Set(contentType, formContentType)
- w.Write([]byte(data.Encode()))
- })
-}
-
-// newUnparseableBodyServer returns a new httptest.Server which writes
-// responses with bodies that error when parsed by url.ParseQuery.
-func newUnparseableBodyServer() *httptest.Server {
- return newMockServer(func(w http.ResponseWriter, req *http.Request) {
- w.Header().Set(contentType, formContentType)
- // url.ParseQuery will error, https://golang.org/src/net/url/url_test.go#L1107
- w.Write([]byte("%gh&%ij"))
- })
-}
-
-func TestConfigRequestToken(t *testing.T) {
- expectedToken := "reqest_token"
- expectedSecret := "request_secret"
- data := url.Values{}
- data.Add("oauth_token", expectedToken)
- data.Add("oauth_token_secret", expectedSecret)
- data.Add("oauth_callback_confirmed", "true")
- server := newRequestTokenServer(t, data)
- defer server.Close()
-
- config := &Config{
- Endpoint: Endpoint{
- RequestTokenURL: server.URL,
- },
- }
- requestToken, requestSecret, err := config.RequestToken()
- assert.Nil(t, err)
- assert.Equal(t, expectedToken, requestToken)
- assert.Equal(t, expectedSecret, requestSecret)
-}
-
-func TestConfigRequestToken_InvalidRequestTokenURL(t *testing.T) {
- config := &Config{
- Endpoint: Endpoint{
- RequestTokenURL: "http://wrong.com/oauth/request_token",
- },
- }
- requestToken, requestSecret, err := config.RequestToken()
- assert.NotNil(t, err)
- assert.Equal(t, "", requestToken)
- assert.Equal(t, "", requestSecret)
-}
-
-func TestConfigRequestToken_CallbackNotConfirmed(t *testing.T) {
- data := url.Values{}
- data.Add("oauth_callback_confirmed", "false")
- server := newRequestTokenServer(t, data)
- defer server.Close()
-
- config := &Config{
- Endpoint: Endpoint{
- RequestTokenURL: server.URL,
- },
- }
- requestToken, requestSecret, err := config.RequestToken()
- if assert.Error(t, err) {
- assert.Equal(t, "oauth1: oauth_callback_confirmed was not true", err.Error())
- }
- assert.Equal(t, "", requestToken)
- assert.Equal(t, "", requestSecret)
-}
-
-func TestConfigRequestToken_CannotParseBody(t *testing.T) {
- server := newUnparseableBodyServer()
- defer server.Close()
-
- config := &Config{
- Endpoint: Endpoint{
- RequestTokenURL: server.URL,
- },
- }
- requestToken, requestSecret, err := config.RequestToken()
- if assert.Error(t, err) {
- assert.Contains(t, err.Error(), "invalid URL escape")
- }
- assert.Equal(t, "", requestToken)
- assert.Equal(t, "", requestSecret)
-}
-
-func TestConfigRequestToken_MissingTokenOrSecret(t *testing.T) {
- data := url.Values{}
- data.Add("oauth_token", "any_token")
- data.Add("oauth_callback_confirmed", "true")
- server := newRequestTokenServer(t, data)
- defer server.Close()
-
- config := &Config{
- Endpoint: Endpoint{
- RequestTokenURL: server.URL,
- },
- }
- requestToken, requestSecret, err := config.RequestToken()
- if assert.Error(t, err) {
- assert.Equal(t, "oauth1: Response missing oauth_token or oauth_token_secret", err.Error())
- }
- assert.Equal(t, "", requestToken)
- assert.Equal(t, "", requestSecret)
-}
-
-func TestAuthorizationURL(t *testing.T) {
- expectedURL := "https://api.example.com/oauth/authorize?oauth_token=a%2Frequest_token"
- config := &Config{
- Endpoint: Endpoint{
- AuthorizeURL: "https://api.example.com/oauth/authorize",
- },
- }
- url, err := config.AuthorizationURL("a/request_token")
- assert.Nil(t, err)
- if assert.NotNil(t, url) {
- assert.Equal(t, expectedURL, url.String())
- }
-}
-
-func TestAuthorizationURL_CannotParseAuthorizeURL(t *testing.T) {
- config := &Config{
- Endpoint: Endpoint{
- AuthorizeURL: "%gh&%ij",
- },
- }
- url, err := config.AuthorizationURL("any_request_token")
- assert.Nil(t, url)
- if assert.Error(t, err) {
- assert.Contains(t, err.Error(), "parse")
- assert.Contains(t, err.Error(), "invalid URL")
- }
-}
-
-func TestConfigAccessToken(t *testing.T) {
- expectedToken := "access_token"
- expectedSecret := "access_secret"
- data := url.Values{}
- data.Add("oauth_token", expectedToken)
- data.Add("oauth_token_secret", expectedSecret)
- server := newAccessTokenServer(t, data)
- defer server.Close()
-
- config := &Config{
- Endpoint: Endpoint{
- AccessTokenURL: server.URL,
- },
- }
- accessToken, accessSecret, err := config.AccessToken("request_token", "request_secret", expectedVerifier)
- assert.Nil(t, err)
- assert.Equal(t, expectedToken, accessToken)
- assert.Equal(t, expectedSecret, accessSecret)
-}
-
-func TestConfigAccessToken_InvalidAccessTokenURL(t *testing.T) {
- config := &Config{
- Endpoint: Endpoint{
- AccessTokenURL: "http://wrong.com/oauth/access_token",
- },
- }
- accessToken, accessSecret, err := config.AccessToken("any_token", "any_secret", "any_verifier")
- assert.NotNil(t, err)
- assert.Equal(t, "", accessToken)
- assert.Equal(t, "", accessSecret)
-}
-
-func TestConfigAccessToken_CannotParseBody(t *testing.T) {
- server := newUnparseableBodyServer()
- defer server.Close()
-
- config := &Config{
- Endpoint: Endpoint{
- AccessTokenURL: server.URL,
- },
- }
- accessToken, accessSecret, err := config.AccessToken("any_token", "any_secret", "any_verifier")
- if assert.Error(t, err) {
- assert.Contains(t, err.Error(), "invalid URL escape")
- }
- assert.Equal(t, "", accessToken)
- assert.Equal(t, "", accessSecret)
-}
-
-func TestConfigAccessToken_MissingTokenOrSecret(t *testing.T) {
- data := url.Values{}
- data.Add("oauth_token", "any_token")
- server := newAccessTokenServer(t, data)
- defer server.Close()
-
- config := &Config{
- Endpoint: Endpoint{
- AccessTokenURL: server.URL,
- },
- }
- accessToken, accessSecret, err := config.AccessToken("request_token", "request_secret", expectedVerifier)
- if assert.Error(t, err) {
- assert.Equal(t, "oauth1: Response missing oauth_token or oauth_token_secret", err.Error())
- }
- assert.Equal(t, "", accessToken)
- assert.Equal(t, "", accessSecret)
-}
-
-func TestParseAuthorizationCallback_GET(t *testing.T) {
- expectedToken := "token"
- expectedVerifier := "verifier"
- server := newMockServer(func(w http.ResponseWriter, req *http.Request) {
- assert.Equal(t, "GET", req.Method)
- // logic under test
- requestToken, verifier, err := ParseAuthorizationCallback(req)
- assert.Nil(t, err)
- assert.Equal(t, expectedToken, requestToken)
- assert.Equal(t, expectedVerifier, verifier)
- })
- defer server.Close()
-
- // OAuth1 provider calls callback url
- url, err := url.Parse(server.URL)
- assert.Nil(t, err)
- query := url.Query()
- query.Add("oauth_token", expectedToken)
- query.Add("oauth_verifier", expectedVerifier)
- url.RawQuery = query.Encode()
- http.Get(url.String())
-}
-
-func TestParseAuthorizationCallback_POST(t *testing.T) {
- expectedToken := "token"
- expectedVerifier := "verifier"
- server := newMockServer(func(w http.ResponseWriter, req *http.Request) {
- assert.Equal(t, "POST", req.Method)
- // logic under test
- requestToken, verifier, err := ParseAuthorizationCallback(req)
- assert.Nil(t, err)
- assert.Equal(t, expectedToken, requestToken)
- assert.Equal(t, expectedVerifier, verifier)
- })
- defer server.Close()
-
- // OAuth1 provider calls callback url
- form := url.Values{}
- form.Add("oauth_token", expectedToken)
- form.Add("oauth_verifier", expectedVerifier)
- http.PostForm(server.URL, form)
-}
-
-func TestParseAuthorizationCallback_MissingTokenOrVerifier(t *testing.T) {
- server := newMockServer(func(w http.ResponseWriter, req *http.Request) {
- assert.Equal(t, "GET", req.Method)
- // logic under test
- requestToken, verifier, err := ParseAuthorizationCallback(req)
- if assert.Error(t, err) {
- assert.Equal(t, "oauth1: Request missing oauth_token or oauth_verifier", err.Error())
- }
- assert.Equal(t, "", requestToken)
- assert.Equal(t, "", verifier)
- })
- defer server.Close()
-
- // OAuth1 provider calls callback url
- url, err := url.Parse(server.URL)
- assert.Nil(t, err)
- query := url.Query()
- query.Add("oauth_token", "any_token")
- query.Add("oauth_verifier", "") // missing oauth_verifier
- url.RawQuery = query.Encode()
- http.Get(url.String())
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/context.go b/vendor/src/github.com/dghubble/oauth1/context.go
deleted file mode 100644
index c4dbc26..0000000
--- a/vendor/src/github.com/dghubble/oauth1/context.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package oauth1
-
-import (
- "net/http"
-
- "golang.org/x/net/context"
-)
-
-type contextKey struct{}
-
-// HTTPClient is the context key to associate an *http.Client value with
-// a context.
-var HTTPClient contextKey
-
-// NoContext is the default context to use in most cases.
-var NoContext = context.TODO()
-
-// contextTransport gets the Transport from the context client or nil.
-func contextTransport(ctx context.Context) http.RoundTripper {
- if client, ok := ctx.Value(HTTPClient).(*http.Client); ok {
- return client.Transport
- }
- return nil
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/context_test.go b/vendor/src/github.com/dghubble/oauth1/context_test.go
deleted file mode 100644
index e81453b..0000000
--- a/vendor/src/github.com/dghubble/oauth1/context_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package oauth1
-
-import (
- "net/http"
- "testing"
-
- "github.com/stretchr/testify/assert"
- "golang.org/x/net/context"
-)
-
-func TestContextTransport(t *testing.T) {
- client := &http.Client{
- Transport: http.DefaultTransport,
- }
- ctx := context.WithValue(NoContext, HTTPClient, client)
- assert.Equal(t, http.DefaultTransport, contextTransport(ctx))
-}
-
-func TestContextTransport_NoContextClient(t *testing.T) {
- assert.Nil(t, contextTransport(NoContext))
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/doc.go b/vendor/src/github.com/dghubble/oauth1/doc.go
deleted file mode 100644
index 96b4452..0000000
--- a/vendor/src/github.com/dghubble/oauth1/doc.go
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
-Package oauth1 is a Go implementation of the OAuth1 spec RFC 5849.
-
-It allows end-users to authorize a client (consumer) to access protected
-resources on their behalf (e.g. login) and allows clients to make signed and
-authorized requests on behalf of a user (e.g. API calls).
-
-It takes design cues from golang.org/x/oauth2, providing an http.Client which
-handles request signing and authorization.
-
-Usage
-
-Package oauth1 implements the OAuth1 authorization flow and provides an
-http.Client which can sign and authorize OAuth1 requests.
-
-To implement "Login with X", use the https://github.com/dghubble/gologin
-packages which provide login handlers for OAuth1 and OAuth2 providers.
-
-To call the Twitter, Digits, or Tumblr OAuth1 APIs, use the higher level Go API
-clients.
-
-* https://github.com/dghubble/go-twitter
-* https://github.com/dghubble/go-digits
-* https://github.com/benfb/go-tumblr
-
-Authorization Flow
-
-Perform the OAuth 1 authorization flow to ask a user to grant an application
-access to his/her resources via an access token.
-
- import (
- "github.com/dghubble/oauth1"
- "github.com/dghubble/oauth1/twitter""
- )
- ...
-
- config := oauth1.Config{
- ConsumerKey: "consumerKey",
- ConsumerSecret: "consumerSecret",
- CallbackURL: "http://mysite.com/oauth/twitter/callback",
- Endpoint: twitter.AuthorizeEndpoint,
- }
-
-1. When a user performs an action (e.g. "Login with X" button calls "/login"
-route) get an OAuth1 request token (temporary credentials).
-
- requestToken, requestSecret, err = config.RequestToken()
- // handle err
-
-2. Obtain authorization from the user by redirecting them to the OAuth1
-provider's authorization URL to grant the application access.
-
- authorizationURL, err := config.AuthorizationURL(requestToken)
- // handle err
- http.Redirect(w, req, authorizationURL.String(), htt.StatusFound)
-
-Receive the callback from the OAuth1 provider in a handler.
-
- requestToken, verifier, err := oauth1.ParseAuthorizationCallback(req)
- // handle err
-
-3. Acquire the access token (token credentials) which can later be used
-to make requests on behalf of the user.
-
- accessToken, accessSecret, err := config.AccessToken(requestToken, requestSecret, verifier)
- // handle error
- token := NewToken(accessToken, accessSecret)
-
-Check the examples to see this authorization flow in action from the command
-line, with Twitter PIN-based login and Tumblr login.
-
-Authorized Requests
-
-Use an access Token to make authorized requests on behalf of a user.
-
- import (
- "github.com/dghubble/oauth1"
- )
-
- func main() {
- config := oauth1.NewConfig("consumerKey", "consumerSecret")
- token := oauth1.NewToken("token", "tokenSecret")
-
- // httpClient will automatically authorize http.Request's
- httpClient := config.Client(token)
-
- // example Twitter API request
- path := "https://api.twitter.com/1.1/statuses/home_timeline.json?count=2"
- resp, _ := httpClient.Get(path)
- defer resp.Body.Close()
- body, _ := ioutil.ReadAll(resp.Body)
- fmt.Printf("Raw Response Body:\n%v\n", string(body))
- }
-
-Check the examples to see Twitter and Tumblr requests in action.
-*/
-package oauth1
diff --git a/vendor/src/github.com/dghubble/oauth1/dropbox/dropbox.go b/vendor/src/github.com/dghubble/oauth1/dropbox/dropbox.go
deleted file mode 100644
index 7902cb9..0000000
--- a/vendor/src/github.com/dghubble/oauth1/dropbox/dropbox.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Package dropbox provides constants for using OAuth1 to access Dropbox.
-package dropbox
-
-import (
- "github.com/dghubble/oauth1"
-)
-
-// Endpoint is Dropbox's OAuth 1 endpoint.
-var Endpoint = oauth1.Endpoint{
- RequestTokenURL: "https://api.dropbox.com/1/oauth/request_token",
- AuthorizeURL: "https://api.dropbox.com/1/oauth/authorize",
- AccessTokenURL: "https://api.dropbox.com/1/oauth/access_token",
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/encode.go b/vendor/src/github.com/dghubble/oauth1/encode.go
deleted file mode 100644
index 9c07834..0000000
--- a/vendor/src/github.com/dghubble/oauth1/encode.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package oauth1
-
-import (
- "bytes"
- "fmt"
-)
-
-// PercentEncode percent encodes a string according to RFC 3986 2.1.
-func PercentEncode(input string) string {
- var buf bytes.Buffer
- for _, b := range []byte(input) {
- // if in unreserved set
- if shouldEscape(b) {
- buf.Write([]byte(fmt.Sprintf("%%%02X", b)))
- } else {
- // do not escape, write byte as-is
- buf.WriteByte(b)
- }
- }
- return buf.String()
-}
-
-// shouldEscape returns false if the byte is an unreserved character that
-// should not be escaped and true otherwise, according to RFC 3986 2.1.
-func shouldEscape(c byte) bool {
- // RFC3986 2.3 unreserved characters
- if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
- return false
- }
- switch c {
- case '-', '.', '_', '~':
- return false
- }
- // all other bytes must be escaped
- return true
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/encode_test.go b/vendor/src/github.com/dghubble/oauth1/encode_test.go
deleted file mode 100644
index 6b310fc..0000000
--- a/vendor/src/github.com/dghubble/oauth1/encode_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package oauth1
-
-import (
- "testing"
-)
-
-func TestPercentEncode(t *testing.T) {
- cases := []struct {
- input string
- expected string
- }{
- {" ", "%20"},
- {"%", "%25"},
- {"&", "%26"},
- {"-._", "-._"},
- {" /=+", "%20%2F%3D%2B"},
- {"Ladies + Gentlemen", "Ladies%20%2B%20Gentlemen"},
- {"An encoded string!", "An%20encoded%20string%21"},
- {"Dogs, Cats & Mice", "Dogs%2C%20Cats%20%26%20Mice"},
- {"☃", "%E2%98%83"},
- }
- for _, c := range cases {
- if output := PercentEncode(c.input); output != c.expected {
- t.Errorf("expected %s, got %s", c.expected, output)
- }
- }
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/endpoint.go b/vendor/src/github.com/dghubble/oauth1/endpoint.go
deleted file mode 100644
index cb3f667..0000000
--- a/vendor/src/github.com/dghubble/oauth1/endpoint.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package oauth1
-
-// Endpoint represents an OAuth1 provider's (server's) request token,
-// owner authorization, and access token request URLs.
-type Endpoint struct {
- // Request URL (Temporary Credential Request URI)
- RequestTokenURL string
- // Authorize URL (Resource Owner Authorization URI)
- AuthorizeURL string
- // Access Token URL (Token Request URI)
- AccessTokenURL string
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/examples/README.md b/vendor/src/github.com/dghubble/oauth1/examples/README.md
deleted file mode 100644
index 47fb9c4..0000000
--- a/vendor/src/github.com/dghubble/oauth1/examples/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-# OAuth1 Examples
-
-## Twitter
-
-### Authorization Flow (PIN-based)
-
-An application can obtain a Twitter access `Token` for a user by requesting the user grant access via [3-legged](https://dev.twitter.com/oauth/3-legged) or [PIN-based](https://dev.twitter.com/oauth/pin-based) OAuth 1. Here is a command line example showing PIN-based authorization.
-
- export TWITTER_CONSUMER_KEY=xxx
- export TWITTER_CONSUMER_SECRET=xxx
- go run twitter-login.go
-
-The OAuth 1 flow can be used to implement Login with Twitter. Upon receiving an access token in a callback handler on your server, issue a user some form of unforgeable session identifier (i.e. cookie, token). Note that web backends should use a real `CallbackURL`, "oob" is for PIN-based agents such as the command line.
-
-### Authorized Requests
-
-Use the access `Token` to make requests on behalf of a Twitter user.
-
- export TWITTER_CONSUMER_KEY=xxx
- export TWITTER_CONSUMER_SECRET=xxx
- export TWITTER_ACCESS_TOKEN=xxx
- export TWITTER_ACCESS_SECRET=xxx
- go run twitter-request.go
-
-
-## Tumblr
-
-### Authorization Flow
-
-An application can obtain a Tumblr access `Token` to act on behalf of a user. Here is a command line example which requests permission.
-
- export TUMBLR_CONSUMER_KEY=xxx
- export TUMBLR_CONSUMER_SECRET=xxx
- go run tumblr-login.go
-
-### Authorized Requests
-
-Use the access `Token` to make requests on behalf of a Tumblr user.
-
- export TUMBLR_CONSUMER_KEY=xxx
- export TUMBLR_CONSUMER_SECRET=xxx
- export TUMBLR_ACCESS_TOKEN=xxx
- export TUMBLR_ACCESS_SECRET=xxx
- go run tumblr-request.go
-
-Note that only some Tumblr endpoints require OAuth1 signed requests, other endpoints require a special consumer key query parameter or no authorization.
-
diff --git a/vendor/src/github.com/dghubble/oauth1/examples/tumblr-login.go b/vendor/src/github.com/dghubble/oauth1/examples/tumblr-login.go
deleted file mode 100644
index 3b81f77..0000000
--- a/vendor/src/github.com/dghubble/oauth1/examples/tumblr-login.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package main
-
-import (
- "fmt"
- "log"
- "os"
-
- "github.com/dghubble/oauth1"
- "github.com/dghubble/oauth1/tumblr"
-)
-
-var config oauth1.Config
-
-// main performs the Tumblr OAuth1 user flow from the command line
-func main() {
- // read credentials from environment variables
- consumerKey := os.Getenv("TUMBLR_CONSUMER_KEY")
- consumerSecret := os.Getenv("TUMBLR_CONSUMER_SECRET")
- if consumerKey == "" || consumerSecret == "" {
- log.Fatal("Required environment variable missing.")
- }
-
- config = oauth1.Config{
- ConsumerKey: consumerKey,
- ConsumerSecret: consumerSecret,
- // Tumblr does not support oob, uses consumer registered callback
- CallbackURL: "",
- Endpoint: tumblr.Endpoint,
- }
-
- requestToken, requestSecret, err := login()
- if err != nil {
- log.Fatalf("Request Token Phase: %s", err.Error())
- }
- accessToken, err := receivePIN(requestToken, requestSecret)
- if err != nil {
- log.Fatalf("Access Token Phase: %s", err.Error())
- }
-
- fmt.Println("Consumer was granted an access token to act on behalf of a user.")
- fmt.Printf("token: %s\nsecret: %s\n", accessToken.Token, accessToken.TokenSecret)
-}
-
-func login() (requestToken, requestSecret string, err error) {
- requestToken, requestSecret, err = config.RequestToken()
- if err != nil {
- return "", "", err
- }
- authorizationURL, err := config.AuthorizationURL(requestToken)
- if err != nil {
- return "", "", err
- }
- fmt.Printf("Open this URL in your browser:\n%s\n", authorizationURL.String())
- return requestToken, requestSecret, err
-}
-
-func receivePIN(requestToken, requestSecret string) (*oauth1.Token, error) {
- fmt.Printf("Choose whether to grant the application access.\nPaste " +
- "the oauth_verifier parameter (excluding trailing #_=_) from the " +
- "address bar: ")
- var verifier string
- _, err := fmt.Scanf("%s", &verifier)
- accessToken, accessSecret, err := config.AccessToken(requestToken, requestSecret, verifier)
- if err != nil {
- return nil, err
- }
- return oauth1.NewToken(accessToken, accessSecret), err
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/examples/tumblr-request.go b/vendor/src/github.com/dghubble/oauth1/examples/tumblr-request.go
deleted file mode 100644
index 588dd41..0000000
--- a/vendor/src/github.com/dghubble/oauth1/examples/tumblr-request.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package main
-
-import (
- "fmt"
- "io/ioutil"
- "os"
-
- "github.com/dghubble/oauth1"
-)
-
-// Tumblr access token (token credential) requests on behalf of a user
-func main() {
- // read credentials from environment variables
- consumerKey := os.Getenv("TUMBLR_CONSUMER_KEY")
- consumerSecret := os.Getenv("TUMBLR_CONSUMER_SECRET")
- accessToken := os.Getenv("TUMBLR_ACCESS_TOKEN")
- accessSecret := os.Getenv("TUMBLR_ACCESS_SECRET")
- if consumerKey == "" || consumerSecret == "" || accessToken == "" || accessSecret == "" {
- panic("Missing required environment variable")
- }
-
- config := oauth1.NewConfig(consumerKey, consumerSecret)
- token := oauth1.NewToken(accessToken, accessSecret)
-
- // httpClient will automatically authorize http.Request's
- httpClient := config.Client(oauth1.NoContext, token)
-
- // get information about the current authenticated user
- path := "https://api.tumblr.com/v2/user/info"
- resp, _ := httpClient.Get(path)
- defer resp.Body.Close()
- body, _ := ioutil.ReadAll(resp.Body)
- fmt.Printf("Raw Response Body:\n%v\n", string(body))
-
- // note: Tumblr requires OAuth signed requests for particular endpoints,
- // others just need a consumer key query parameter (its janky).
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/examples/twitter-login.go b/vendor/src/github.com/dghubble/oauth1/examples/twitter-login.go
deleted file mode 100644
index 3ef4c3a..0000000
--- a/vendor/src/github.com/dghubble/oauth1/examples/twitter-login.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package main
-
-import (
- "fmt"
- "log"
- "os"
-
- "github.com/dghubble/oauth1"
- twauth "github.com/dghubble/oauth1/twitter"
-)
-
-const outOfBand = "oob"
-
-var config oauth1.Config
-
-// main performs Twitter PIN-based 3-legged OAuth 1 from the command line
-func main() {
- // read credentials from environment variables
- consumerKey := os.Getenv("TWITTER_CONSUMER_KEY")
- consumerSecret := os.Getenv("TWITTER_CONSUMER_SECRET")
- if consumerKey == "" || consumerSecret == "" {
- log.Fatal("Required environment variable missing.")
- }
-
- config = oauth1.Config{
- ConsumerKey: consumerKey,
- ConsumerSecret: consumerSecret,
- CallbackURL: outOfBand,
- Endpoint: twauth.AuthorizeEndpoint,
- }
-
- requestToken, err := login()
- if err != nil {
- log.Fatalf("Request Token Phase: %s", err.Error())
- }
- accessToken, err := receivePIN(requestToken)
- if err != nil {
- log.Fatalf("Access Token Phase: %s", err.Error())
- }
-
- fmt.Println("Consumer was granted an access token to act on behalf of a user.")
- fmt.Printf("token: %s\nsecret: %s\n", accessToken.Token, accessToken.TokenSecret)
-}
-
-func login() (requestToken string, err error) {
- requestToken, _, err = config.RequestToken()
- if err != nil {
- return "", err
- }
- authorizationURL, err := config.AuthorizationURL(requestToken)
- if err != nil {
- return "", err
- }
- fmt.Printf("Open this URL in your browser:\n%s\n", authorizationURL.String())
- return requestToken, err
-}
-
-func receivePIN(requestToken string) (*oauth1.Token, error) {
- fmt.Printf("Paste your PIN here: ")
- var verifier string
- _, err := fmt.Scanf("%s", &verifier)
- if err != nil {
- return nil, err
- }
- // Twitter ignores the oauth_signature on the access token request. The user
- // to which the request (temporary) token corresponds is already known on the
- // server. The request for a request token earlier was validated signed by
- // the consumer. Consumer applications can avoid keeping request token state
- // between authorization granting and callback handling.
- accessToken, accessSecret, err := config.AccessToken(requestToken, "secret does not matter", verifier)
- if err != nil {
- return nil, err
- }
- return oauth1.NewToken(accessToken, accessSecret), err
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/examples/twitter-request.go b/vendor/src/github.com/dghubble/oauth1/examples/twitter-request.go
deleted file mode 100644
index fba66b7..0000000
--- a/vendor/src/github.com/dghubble/oauth1/examples/twitter-request.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package main
-
-import (
- "fmt"
- "io/ioutil"
- "os"
-
- "github.com/dghubble/go-twitter/twitter"
- "github.com/dghubble/oauth1"
-)
-
-// Twitter user-auth requests with an Access Token (token credential)
-func main() {
- // read credentials from environment variables
- consumerKey := os.Getenv("TWITTER_CONSUMER_KEY")
- consumerSecret := os.Getenv("TWITTER_CONSUMER_SECRET")
- accessToken := os.Getenv("TWITTER_ACCESS_TOKEN")
- accessSecret := os.Getenv("TWITTER_ACCESS_SECRET")
- if consumerKey == "" || consumerSecret == "" || accessToken == "" || accessSecret == "" {
- panic("Missing required environment variable")
- }
-
- config := oauth1.NewConfig(consumerKey, consumerSecret)
- token := oauth1.NewToken(accessToken, accessSecret)
-
- // httpClient will automatically authorize http.Request's
- httpClient := config.Client(oauth1.NoContext, token)
-
- path := "https://api.twitter.com/1.1/statuses/home_timeline.json?count=2"
- resp, _ := httpClient.Get(path)
- defer resp.Body.Close()
- body, _ := ioutil.ReadAll(resp.Body)
- fmt.Printf("Raw Response Body:\n%v\n", string(body))
-
- // Nicer: Pass OAuth1 client to go-twitter API
- api := twitter.NewClient(httpClient)
- tweets, _, _ := api.Timelines.HomeTimeline(nil)
- fmt.Printf("User's HOME TIMELINE:\n%+v\n", tweets)
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/reference_test.go b/vendor/src/github.com/dghubble/oauth1/reference_test.go
deleted file mode 100644
index 3fbfc12..0000000
--- a/vendor/src/github.com/dghubble/oauth1/reference_test.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package oauth1
-
-import (
- "fmt"
- "net/http"
- "net/url"
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-const (
- expectedVersion = "1.0"
- expectedSignatureMethod = "HMAC-SHA1"
-)
-
-func TestTwitterRequestTokenAuthHeader(t *testing.T) {
- // example from https://dev.twitter.com/web/sign-in/implementing
- var unixTimestamp int64 = 1318467427
- expectedConsumerKey := "cChZNFj6T5R0TigYB9yd1w"
- expectedCallback := "http%3A%2F%2Flocalhost%2Fsign-in-with-twitter%2F"
- expectedSignature := "F1Li3tvehgcraF8DMJ7OyxO4w9Y%3D"
- expectedTimestamp := "1318467427"
- expectedNonce := "ea9ec8429b68d6b77cd5600adbbb0456"
- config := &Config{
- ConsumerKey: expectedConsumerKey,
- ConsumerSecret: "L8qq9PZyRg6ieKGEKhZolGC0vJWLw8iEJ88DRdyOg",
- CallbackURL: "http://localhost/sign-in-with-twitter/",
- Endpoint: Endpoint{
- RequestTokenURL: "https://api.twitter.com/oauth/request_token",
- AuthorizeURL: "https://api.twitter.com/oauth/authorize",
- AccessTokenURL: "https://api.twitter.com/oauth/access_token",
- },
- }
-
- auther := &auther{config, &fixedClock{time.Unix(unixTimestamp, 0)}, &fixedNoncer{expectedNonce}}
- req, err := http.NewRequest("POST", config.Endpoint.RequestTokenURL, nil)
- assert.Nil(t, err)
- err = auther.setRequestTokenAuthHeader(req)
- // assert the request for a request token is signed and has an oauth_callback
- assert.Nil(t, err)
- params := parseOAuthParamsOrFail(t, req.Header.Get(authorizationHeaderParam))
- assert.Equal(t, expectedCallback, params[oauthCallbackParam])
- assert.Equal(t, expectedSignature, params[oauthSignatureParam])
- // additional OAuth parameters
- assert.Equal(t, expectedConsumerKey, params[oauthConsumerKeyParam])
- assert.Equal(t, expectedNonce, params[oauthNonceParam])
- assert.Equal(t, expectedTimestamp, params[oauthTimestampParam])
- assert.Equal(t, expectedVersion, params[oauthVersionParam])
- assert.Equal(t, expectedSignatureMethod, params[oauthSignatureMethodParam])
-}
-
-func TestTwitterAccessTokenAuthHeader(t *testing.T) {
- // example from https://dev.twitter.com/web/sign-in/implementing
- var unixTimestamp int64 = 1318467427
- expectedConsumerKey := "cChZNFj6T5R0TigYB9yd1w"
- expectedRequestToken := "NPcudxy0yU5T3tBzho7iCotZ3cnetKwcTIRlX0iwRl0"
- requestTokenSecret := "veNRnAWe6inFuo8o2u8SLLZLjolYDmDP7SzL0YfYI"
- expectedVerifier := "uw7NjWHT6OJ1MpJOXsHfNxoAhPKpgI8BlYDhxEjIBY"
- expectedSignature := "39cipBtIOHEEnybAR4sATQTpl2I%3D"
- expectedTimestamp := "1318467427"
- expectedNonce := "a9900fe68e2573b27a37f10fbad6a755"
- config := &Config{
- ConsumerKey: expectedConsumerKey,
- ConsumerSecret: "L8qq9PZyRg6ieKGEKhZolGC0vJWLw8iEJ88DRdyOg",
- Endpoint: Endpoint{
- RequestTokenURL: "https://api.twitter.com/oauth/request_token",
- AuthorizeURL: "https://api.twitter.com/oauth/authorize",
- AccessTokenURL: "https://api.twitter.com/oauth/access_token",
- },
- }
-
- auther := &auther{config, &fixedClock{time.Unix(unixTimestamp, 0)}, &fixedNoncer{expectedNonce}}
- req, err := http.NewRequest("POST", config.Endpoint.AccessTokenURL, nil)
- assert.Nil(t, err)
- err = auther.setAccessTokenAuthHeader(req, expectedRequestToken, requestTokenSecret, expectedVerifier)
- // assert the request for an access token is signed and has an oauth_token and verifier
- assert.Nil(t, err)
- params := parseOAuthParamsOrFail(t, req.Header.Get(authorizationHeaderParam))
- assert.Equal(t, expectedRequestToken, params[oauthTokenParam])
- assert.Equal(t, expectedVerifier, params[oauthVerifierParam])
- assert.Equal(t, expectedSignature, params[oauthSignatureParam])
- // additional OAuth parameters
- assert.Equal(t, expectedConsumerKey, params[oauthConsumerKeyParam])
- assert.Equal(t, expectedNonce, params[oauthNonceParam])
- assert.Equal(t, expectedTimestamp, params[oauthTimestampParam])
- assert.Equal(t, expectedVersion, params[oauthVersionParam])
- assert.Equal(t, expectedSignatureMethod, params[oauthSignatureMethodParam])
-}
-
-// example from https://dev.twitter.com/oauth/overview/authorizing-requests,
-// https://dev.twitter.com/oauth/overview/creating-signatures, and
-// https://dev.twitter.com/oauth/application-only
-var unixTimestampOfRequest int64 = 1318622958
-var expectedTwitterConsumerKey = "xvz1evFS4wEEPTGEFPHBog"
-var expectedTwitterOAuthToken = "370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb"
-var expectedNonce = "kYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg"
-var twitterConfig = &Config{
- ConsumerKey: expectedTwitterConsumerKey,
- ConsumerSecret: "kAcSOqF21Fu85e7zjz7ZN2U4ZRhfV3WpwPAoE3Z7kBw",
- Endpoint: Endpoint{
- RequestTokenURL: "https://api.twitter.com/oauth/request_token",
- AuthorizeURL: "https://api.twitter.com/oauth/authorize",
- AccessTokenURL: "https://api.twitter.com/oauth/access_token",
- },
-}
-
-func TestTwitterParameterString(t *testing.T) {
- auther := &auther{twitterConfig, &fixedClock{time.Unix(unixTimestampOfRequest, 0)}, &fixedNoncer{expectedNonce}}
- values := url.Values{}
- values.Add("status", "Hello Ladies + Gentlemen, a signed OAuth request!")
- // note: the reference example is old and uses api v1 in the URL
- req, err := http.NewRequest("post", "https://api.twitter.com/1/statuses/update.json?include_entities=true", strings.NewReader(values.Encode()))
- assert.Nil(t, err)
- req.Header.Set(contentType, formContentType)
- oauthParams := auther.commonOAuthParams()
- oauthParams[oauthTokenParam] = expectedTwitterOAuthToken
- params, err := collectParameters(req, oauthParams)
- // assert that the parameter string matches the reference
- expectedParameterString := "include_entities=true&oauth_consumer_key=xvz1evFS4wEEPTGEFPHBog&oauth_nonce=kYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg&oauth_signature_method=HMAC-SHA1&oauth_timestamp=1318622958&oauth_token=370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb&oauth_version=1.0&status=Hello%20Ladies%20%2B%20Gentlemen%2C%20a%20signed%20OAuth%20request%21"
- assert.Nil(t, err)
- assert.Equal(t, expectedParameterString, normalizedParameterString(params))
-}
-
-func TestTwitterSignatureBase(t *testing.T) {
- auther := &auther{twitterConfig, &fixedClock{time.Unix(unixTimestampOfRequest, 0)}, &fixedNoncer{expectedNonce}}
- values := url.Values{}
- values.Add("status", "Hello Ladies + Gentlemen, a signed OAuth request!")
- // note: the reference example is old and uses api v1 in the URL
- req, err := http.NewRequest("post", "https://api.twitter.com/1/statuses/update.json?include_entities=true", strings.NewReader(values.Encode()))
- assert.Nil(t, err)
- req.Header.Set(contentType, formContentType)
- oauthParams := auther.commonOAuthParams()
- oauthParams[oauthTokenParam] = expectedTwitterOAuthToken
- params, err := collectParameters(req, oauthParams)
- signatureBase := signatureBase(req, params)
- // assert that the signature base string matches the reference
- // checks that method is uppercased, url is encoded, parameter string is added, all joined by &
- expectedSignatureBase := "POST&https%3A%2F%2Fapi.twitter.com%2F1%2Fstatuses%2Fupdate.json&include_entities%3Dtrue%26oauth_consumer_key%3Dxvz1evFS4wEEPTGEFPHBog%26oauth_nonce%3DkYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D1318622958%26oauth_token%3D370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb%26oauth_version%3D1.0%26status%3DHello%2520Ladies%2520%252B%2520Gentlemen%252C%2520a%2520signed%2520OAuth%2520request%2521"
- assert.Nil(t, err)
- assert.Equal(t, expectedSignatureBase, signatureBase)
-}
-
-func TestTwitterRequestAuthHeader(t *testing.T) {
- oauthTokenSecret := "LswwdoUaIvS8ltyTt5jkRh4J50vUPVVHtR2YPi5kE"
- expectedSignature := PercentEncode("tnnArxj06cWHq44gCs1OSKk/jLY=")
- expectedTimestamp := "1318622958"
-
- auther := &auther{twitterConfig, &fixedClock{time.Unix(unixTimestampOfRequest, 0)}, &fixedNoncer{expectedNonce}}
- values := url.Values{}
- values.Add("status", "Hello Ladies + Gentlemen, a signed OAuth request!")
-
- accessToken := &Token{expectedTwitterOAuthToken, oauthTokenSecret}
- req, err := http.NewRequest("POST", "https://api.twitter.com/1/statuses/update.json?include_entities=true", strings.NewReader(values.Encode()))
- assert.Nil(t, err)
- req.Header.Set(contentType, formContentType)
- err = auther.setRequestAuthHeader(req, accessToken)
- // assert that request is signed and has an access token token
- assert.Nil(t, err)
- params := parseOAuthParamsOrFail(t, req.Header.Get(authorizationHeaderParam))
- assert.Equal(t, expectedTwitterOAuthToken, params[oauthTokenParam])
- assert.Equal(t, expectedSignature, params[oauthSignatureParam])
- // additional OAuth parameters
- assert.Equal(t, expectedTwitterConsumerKey, params[oauthConsumerKeyParam])
- assert.Equal(t, expectedNonce, params[oauthNonceParam])
- assert.Equal(t, expectedSignatureMethod, params[oauthSignatureMethodParam])
- assert.Equal(t, expectedTimestamp, params[oauthTimestampParam])
- assert.Equal(t, expectedVersion, params[oauthVersionParam])
-}
-
-func parseOAuthParamsOrFail(t *testing.T, authHeader string) map[string]string {
- if !strings.HasPrefix(authHeader, authorizationPrefix) {
- assert.Fail(t, fmt.Sprintf("Expected Authorization header to start with \"%s\", got \"%s\"", authorizationPrefix, authHeader[:len(authorizationPrefix)+1]))
- }
- params := map[string]string{}
- for _, pairStr := range strings.Split(authHeader[len(authorizationPrefix):], ", ") {
- pair := strings.Split(pairStr, "=")
- if len(pair) != 2 {
- assert.Fail(t, "Error parsing OAuth parameter %s", pairStr)
- }
- params[pair[0]] = strings.Replace(pair[1], "\"", "", -1)
- }
- return params
-}
-
-type fixedClock struct {
- now time.Time
-}
-
-func (c *fixedClock) Now() time.Time {
- return c.now
-}
-
-type fixedNoncer struct {
- nonce string
-}
-
-func (n *fixedNoncer) Nonce() string {
- return n.nonce
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/signer.go b/vendor/src/github.com/dghubble/oauth1/signer.go
deleted file mode 100644
index 341c859..0000000
--- a/vendor/src/github.com/dghubble/oauth1/signer.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package oauth1
-
-import (
- "crypto"
- "crypto/hmac"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha1"
- "encoding/base64"
- "strings"
-)
-
-// A Signer signs messages to create signed OAuth1 Requests.
-type Signer interface {
- // Name returns the name of the signing method.
- Name() string
- // Sign signs the message using the given secret key.
- Sign(key string, message string) (string, error)
-}
-
-// HMACSigner signs messages with an HMAC SHA1 digest, using the concatenated
-// consumer secret and token secret as the key.
-type HMACSigner struct {
- ConsumerSecret string
-}
-
-// Name returns the HMAC-SHA1 method.
-func (s *HMACSigner) Name() string {
- return "HMAC-SHA1"
-}
-
-// Sign creates a concatenated consumer and token secret key and calculates
-// the HMAC digest of the message. Returns the base64 encoded digest bytes.
-func (s *HMACSigner) Sign(tokenSecret, message string) (string, error) {
- signingKey := strings.Join([]string{s.ConsumerSecret, tokenSecret}, "&")
- mac := hmac.New(sha1.New, []byte(signingKey))
- mac.Write([]byte(message))
- signatureBytes := mac.Sum(nil)
- return base64.StdEncoding.EncodeToString(signatureBytes), nil
-}
-
-// RSASigner RSA PKCS1-v1_5 signs SHA1 digests of messages using the given
-// RSA private key.
-type RSASigner struct {
- PrivateKey *rsa.PrivateKey
-}
-
-// Name returns the RSA-SHA1 method.
-func (s *RSASigner) Name() string {
- return "RSA-SHA1"
-}
-
-// Sign uses RSA PKCS1-v1_5 to sign a SHA1 digest of the given message. The
-// tokenSecret is not used with this signing scheme.
-func (s *RSASigner) Sign(tokenSecret, message string) (string, error) {
- digest := sha1.Sum([]byte(message))
- signature, err := rsa.SignPKCS1v15(rand.Reader, s.PrivateKey, crypto.SHA1, digest[:])
- if err != nil {
- return "", err
- }
- return base64.StdEncoding.EncodeToString(signature), nil
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/test b/vendor/src/github.com/dghubble/oauth1/test
deleted file mode 100644
index 10cb57c..0000000
--- a/vendor/src/github.com/dghubble/oauth1/test
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash -e
-
-go test . -cover
-go vet ./...
-
-echo "Checking gofmt..."
-FORMATTABLE="$(find . -type f -name '*.go')"
-fmtRes=$(gofmt -l $FORMATTABLE)
-if [ -n "${fmtRes}" ]; then
- echo -e "gofmt checking failed:\n${fmtRes}"
- exit 2
-fi
-
-echo "Checking golint..."
-lintRes=$(go list ./... | xargs -n 1 golint)
-if [ -n "${lintRes}" ]; then
- echo -e "golint checking failed:\n${lintRes}"
- exit 2
-fi
diff --git a/vendor/src/github.com/dghubble/oauth1/token.go b/vendor/src/github.com/dghubble/oauth1/token.go
deleted file mode 100644
index d010d2f..0000000
--- a/vendor/src/github.com/dghubble/oauth1/token.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package oauth1
-
-import (
- "errors"
-)
-
-// A TokenSource can return a Token.
-type TokenSource interface {
- Token() (*Token, error)
-}
-
-// Token is an AccessToken (token credential) which allows a consumer (client)
-// to access resources from an OAuth1 provider server.
-type Token struct {
- Token string
- TokenSecret string
-}
-
-// NewToken returns a new Token with the given token and token secret.
-func NewToken(token, tokenSecret string) *Token {
- return &Token{
- Token: token,
- TokenSecret: tokenSecret,
- }
-}
-
-// StaticTokenSource returns a TokenSource which always returns the same Token.
-// This is appropriate for tokens which do not have a time expiration.
-func StaticTokenSource(token *Token) TokenSource {
- return staticTokenSource{token}
-}
-
-// staticTokenSource is a TokenSource that always returns the same Token.
-type staticTokenSource struct {
- token *Token
-}
-
-func (s staticTokenSource) Token() (*Token, error) {
- if s.token == nil {
- return nil, errors.New("oauth1: Token is nil")
- }
- return s.token, nil
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/token_test.go b/vendor/src/github.com/dghubble/oauth1/token_test.go
deleted file mode 100644
index 140dc45..0000000
--- a/vendor/src/github.com/dghubble/oauth1/token_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package oauth1
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestNewToken(t *testing.T) {
- expectedToken := "token"
- expectedSecret := "secret"
- tk := NewToken(expectedToken, expectedSecret)
- assert.Equal(t, expectedToken, tk.Token)
- assert.Equal(t, expectedSecret, tk.TokenSecret)
-}
-
-func TestStaticTokenSource(t *testing.T) {
- ts := StaticTokenSource(NewToken("t", "s"))
- tk, err := ts.Token()
- assert.Nil(t, err)
- assert.Equal(t, "t", tk.Token)
-}
-
-func TestStaticTokenSourceEmpty(t *testing.T) {
- ts := StaticTokenSource(nil)
- tk, err := ts.Token()
- assert.Nil(t, tk)
- if assert.Error(t, err) {
- assert.Equal(t, "oauth1: Token is nil", err.Error())
- }
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/transport.go b/vendor/src/github.com/dghubble/oauth1/transport.go
deleted file mode 100644
index c1af993..0000000
--- a/vendor/src/github.com/dghubble/oauth1/transport.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package oauth1
-
-import (
- "fmt"
- "net/http"
-)
-
-// Transport is an http.RoundTripper which makes OAuth1 HTTP requests. It
-// wraps a base RoundTripper and adds an Authorization header using the
-// token from a TokenSource.
-//
-// Transport is a low-level component, most users should use Config to create
-// an http.Client instead.
-type Transport struct {
- // Base is the base RoundTripper used to make HTTP requests. If nil, then
- // http.DefaultTransport is used
- Base http.RoundTripper
- // source supplies the token to use when signing a request
- source TokenSource
- // auther adds OAuth1 Authorization headers to requests
- auther *auther
-}
-
-// RoundTrip authorizes the request with a signed OAuth1 Authorization header
-// using the auther and TokenSource.
-func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- if t.source == nil {
- return nil, fmt.Errorf("oauth1: Transport's source is nil")
- }
- accessToken, err := t.source.Token()
- if err != nil {
- return nil, err
- }
- if t.auther == nil {
- return nil, fmt.Errorf("oauth1: Transport's auther is nil")
- }
- // RoundTripper should not modify the given request, clone it
- req2 := cloneRequest(req)
- err = t.auther.setRequestAuthHeader(req2, accessToken)
- if err != nil {
- return nil, err
- }
- return t.base().RoundTrip(req2)
-}
-
-func (t *Transport) base() http.RoundTripper {
- if t.Base != nil {
- return t.Base
- }
- return http.DefaultTransport
-}
-
-// cloneRequest returns a clone of the given *http.Request with a shallow
-// copy of struct fields and a deep copy of the Header map.
-func cloneRequest(req *http.Request) *http.Request {
- // shallow copy the struct
- r2 := new(http.Request)
- *r2 = *req
- // deep copy Header so setting a header on the clone does not affect original
- r2.Header = make(http.Header, len(req.Header))
- for k, s := range req.Header {
- r2.Header[k] = append([]string(nil), s...)
- }
- return r2
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/transport_test.go b/vendor/src/github.com/dghubble/oauth1/transport_test.go
deleted file mode 100644
index 4a6fef5..0000000
--- a/vendor/src/github.com/dghubble/oauth1/transport_test.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package oauth1
-
-import (
- "net/http"
- "net/http/httptest"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestTransport(t *testing.T) {
- const (
- expectedToken = "access_token"
- expectedConsumerKey = "consumer_key"
- expectedNonce = "some_nonce"
- expectedSignatureMethod = "HMAC-SHA1"
- expectedTimestamp = "123456789"
- )
- server := newMockServer(func(w http.ResponseWriter, req *http.Request) {
- params := parseOAuthParamsOrFail(t, req.Header.Get("Authorization"))
- assert.Equal(t, expectedToken, params[oauthTokenParam])
- assert.Equal(t, expectedConsumerKey, params[oauthConsumerKeyParam])
- assert.Equal(t, expectedNonce, params[oauthNonceParam])
- assert.Equal(t, expectedSignatureMethod, params[oauthSignatureMethodParam])
- assert.Equal(t, expectedTimestamp, params[oauthTimestampParam])
- assert.Equal(t, defaultOauthVersion, params[oauthVersionParam])
- // oauth_signature will vary, httptest.Server uses a random port
- })
- defer server.Close()
-
- config := &Config{
- ConsumerKey: expectedConsumerKey,
- ConsumerSecret: "consumer_secret",
- }
- auther := &auther{
- config: config,
- clock: &fixedClock{time.Unix(123456789, 0)},
- noncer: &fixedNoncer{expectedNonce},
- }
- tr := &Transport{
- source: StaticTokenSource(NewToken(expectedToken, "some_secret")),
- auther: auther,
- }
- client := &http.Client{Transport: tr}
-
- req, err := http.NewRequest("GET", server.URL, nil)
- assert.Nil(t, err)
- _, err = client.Do(req)
- assert.Nil(t, err)
-}
-
-func TestTransport_defaultBaseTransport(t *testing.T) {
- tr := &Transport{
- Base: nil,
- }
- assert.Equal(t, http.DefaultTransport, tr.base())
-}
-
-func TestTransport_customBaseTransport(t *testing.T) {
- expected := &http.Transport{}
- tr := &Transport{
- Base: expected,
- }
- assert.Equal(t, expected, tr.base())
-}
-
-func TestTransport_nilSource(t *testing.T) {
- tr := &Transport{
- source: nil,
- auther: &auther{
- config: &Config{},
- clock: &fixedClock{time.Unix(123456789, 0)},
- noncer: &fixedNoncer{"any_nonce"},
- },
- }
- client := &http.Client{Transport: tr}
- resp, err := client.Get("http://example.com")
- assert.Nil(t, resp)
- if assert.Error(t, err) {
- assert.Equal(t, "Get http://example.com: oauth1: Transport's source is nil", err.Error())
- }
-}
-
-func TestTransport_emptySource(t *testing.T) {
- tr := &Transport{
- source: StaticTokenSource(nil),
- auther: &auther{
- config: &Config{},
- clock: &fixedClock{time.Unix(123456789, 0)},
- noncer: &fixedNoncer{"any_nonce"},
- },
- }
- client := &http.Client{Transport: tr}
- resp, err := client.Get("http://example.com")
- assert.Nil(t, resp)
- if assert.Error(t, err) {
- assert.Equal(t, "Get http://example.com: oauth1: Token is nil", err.Error())
- }
-}
-
-func TestTransport_nilAuther(t *testing.T) {
- tr := &Transport{
- source: StaticTokenSource(&Token{}),
- auther: nil,
- }
- client := &http.Client{Transport: tr}
- resp, err := client.Get("http://example.com")
- assert.Nil(t, resp)
- if assert.Error(t, err) {
- assert.Equal(t, "Get http://example.com: oauth1: Transport's auther is nil", err.Error())
- }
-}
-
-func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server {
- return httptest.NewServer(http.HandlerFunc(handler))
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/tumblr/tumblr.go b/vendor/src/github.com/dghubble/oauth1/tumblr/tumblr.go
deleted file mode 100644
index 5c020ca..0000000
--- a/vendor/src/github.com/dghubble/oauth1/tumblr/tumblr.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Package tumblr provides constants for using OAuth 1 to access Tumblr.
-package tumblr
-
-import (
- "github.com/dghubble/oauth1"
-)
-
-// Endpoint is Tumblr's OAuth 1a endpoint.
-var Endpoint = oauth1.Endpoint{
- RequestTokenURL: "http://www.tumblr.com/oauth/request_token",
- AuthorizeURL: "http://www.tumblr.com/oauth/authorize",
- AccessTokenURL: "http://www.tumblr.com/oauth/access_token",
-}
diff --git a/vendor/src/github.com/dghubble/oauth1/twitter/twitter.go b/vendor/src/github.com/dghubble/oauth1/twitter/twitter.go
deleted file mode 100644
index ffb0ea6..0000000
--- a/vendor/src/github.com/dghubble/oauth1/twitter/twitter.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Package twitter provides constants for using OAuth1 to access Twitter.
-package twitter
-
-import (
- "github.com/dghubble/oauth1"
-)
-
-// AuthenticateEndpoint is Twitter's OAuth 1 endpoint which uses the
-// oauth/authenticate AuthorizeURL redirect. Logged in users who have granted
-// access are immediately authenticated and redirected to the callback URL.
-var AuthenticateEndpoint = oauth1.Endpoint{
- RequestTokenURL: "https://api.twitter.com/oauth/request_token",
- AuthorizeURL: "https://api.twitter.com/oauth/authenticate",
- AccessTokenURL: "https://api.twitter.com/oauth/access_token",
-}
-
-// AuthorizeEndpoint is Twitter's OAuth 1 endpoint which uses the
-// oauth/authorize AuthorizeURL redirect. Note that this requires users who
-// have granted access previously, to re-grant access at AuthorizeURL.
-// Prefer AuthenticateEndpoint over AuthorizeEndpoint if you are unsure.
-var AuthorizeEndpoint = oauth1.Endpoint{
- RequestTokenURL: "https://api.twitter.com/oauth/request_token",
- AuthorizeURL: "https://api.twitter.com/oauth/authorize",
- AccessTokenURL: "https://api.twitter.com/oauth/access_token",
-}
diff --git a/vendor/src/github.com/dghubble/sling/CHANGES.md b/vendor/src/github.com/dghubble/sling/CHANGES.md
deleted file mode 100644
index d93cfd5..0000000
--- a/vendor/src/github.com/dghubble/sling/CHANGES.md
+++ /dev/null
@@ -1,52 +0,0 @@
-# Sling Changelog
-
-Notable changes between releases.
-
-## latest
-
-* Added Sling `Body` setter to set an `io.Reader` on the Request
-
-## v1.0.0 (2015-05-23)
-
-* Added support for receiving and decoding error JSON structs
-* Renamed Sling `JsonBody` setter to `BodyJSON` (breaking)
-* Renamed Sling `BodyStruct` setter to `BodyForm` (breaking)
-* Renamed Sling fields `httpClient`, `method`, `rawURL`, and `header` to be internal (breaking)
-* Changed `Do` and `Receive` to skip response JSON decoding if "application/json" Content-Type is missing
-* Changed `Sling.Receive(v interface{})` to `Sling.Receive(successV, failureV interface{})` (breaking)
- * Previously `Receive` attempted to decode the response Body in all cases
- * Updated `Receive` will decode the response Body into successV for 2XX responses or decode the Body into failureV for other status codes. Pass a nil `successV` or `failureV` to skip JSON decoding into that value.
- * To upgrade, pass nil for the `failureV` argument or consider defining a JSON tagged struct appropriate for the API endpoint. (e.g. `s.Receive(&issue, nil)`, `s.Receive(&issue, &githubError)`)
- * To retain the old behavior, duplicate the first argument (e.g. s.Receive(&tweet, &tweet))
-* Changed `Sling.Do(http.Request, v interface{})` to `Sling.Do(http.Request, successV, failureV interface{})` (breaking)
- * See the changelog entry about `Receive`, the upgrade path is the same.
-* Removed HEAD, GET, POST, PUT, PATCH, DELETE constants, no reason to export them (breaking)
-
-## v0.4.0 (2015-04-26)
-
-* Improved golint compliance
-* Fixed typos and test printouts
-
-## v0.3.0 (2015-04-21)
-
-* Added BodyStruct method for setting a url encoded form body on the Request
-* Added Add and Set methods for adding or setting Request Headers
-* Added JsonBody method for setting JSON Request Body
-* Improved examples and documentation
-
-## v0.2.0 (2015-04-05)
-
-* Added http.Client setter
-* Added Sling.New() method to return a copy of a Sling
-* Added Base setter and Path extension support
-* Added method setters (Get, Post, Put, Patch, Delete, Head)
-* Added support for encoding URL Query parameters
-* Added example tiny Github API
-* Changed v0.1.0 method signatures and names (breaking)
-* Removed Go 1.0 support
-
-## v0.1.0 (2015-04-01)
-
-* Support decoding JSON responses.
-
-
diff --git a/vendor/src/github.com/dghubble/sling/LICENSE b/vendor/src/github.com/dghubble/sling/LICENSE
deleted file mode 100644
index 2718840..0000000
--- a/vendor/src/github.com/dghubble/sling/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Dalton Hubble
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/src/github.com/dghubble/sling/README.md b/vendor/src/github.com/dghubble/sling/README.md
deleted file mode 100644
index 1a8685c..0000000
--- a/vendor/src/github.com/dghubble/sling/README.md
+++ /dev/null
@@ -1,273 +0,0 @@
-
-# Sling [![Build Status](https://travis-ci.org/dghubble/sling.png?branch=master)](https://travis-ci.org/dghubble/sling) [![GoDoc](https://godoc.org/github.com/dghubble/sling?status.png)](https://godoc.org/github.com/dghubble/sling)
-
-
-Sling is a Go HTTP client library for creating and sending API requests.
-
-Slings store HTTP Request properties to simplify sending requests and decoding responses. Check [usage](#usage) or the [examples](examples) to learn how to compose a Sling into your API client.
-
-### Features
-
-* Method Setters: Get/Post/Put/Patch/Delete/Head
-* Add or Set Request Headers
-* Base/Path: Extend a Sling for different endpoints
-* Encode structs into URL query parameters
-* Encode a form or JSON into the Request Body
-* Receive JSON success or failure responses
-
-## Install
-
- go get github.com/dghubble/sling
-
-## Documentation
-
-Read [GoDoc](https://godoc.org/github.com/dghubble/sling)
-
-## Usage
-
-Use a Sling to set path, method, header, query, or body properties and create an `http.Request`.
-
-```go
-type Params struct {
- Count int `url:"count,omitempty"`
-}
-params := &Params{Count: 5}
-
-req, err := sling.New().Get("https://example.com").QueryStruct(params).Request()
-client.Do(req)
-```
-
-### Path
-
-Use `Path` to set or extend the URL for created Requests. Extension means the path will be resolved relative to the existing URL.
-
-```go
-// creates a GET request to https://example.com/foo/bar
-req, err := sling.New().Base("https://example.com/").Path("foo/").Path("bar").Request()
-```
-
-Use `Get`, `Post`, `Put`, `Patch`, `Delete`, or `Head` which are exactly the same as `Path` except they set the HTTP method too.
-
-```go
-req, err := sling.New().Post("http://upload.com/gophers")
-```
-
-### Headers
-
-`Add` or `Set` headers for requests created by a Sling.
-
-```go
-s := sling.New().Base(baseUrl).Set("User-Agent", "Gophergram API Client")
-req, err := s.New().Get("gophergram/list").Request()
-```
-
-### Query
-
-#### QueryStruct
-
-Define [url tagged structs](https://godoc.org/github.com/google/go-querystring/query). Use `QueryStruct` to encode a struct as query parameters on requests.
-
-```go
-// Github Issue Parameters
-type IssueParams struct {
- Filter string `url:"filter,omitempty"`
- State string `url:"state,omitempty"`
- Labels string `url:"labels,omitempty"`
- Sort string `url:"sort,omitempty"`
- Direction string `url:"direction,omitempty"`
- Since string `url:"since,omitempty"`
-}
-```
-
-```go
-githubBase := sling.New().Base("https://api.github.com/").Client(httpClient)
-
-path := fmt.Sprintf("repos/%s/%s/issues", owner, repo)
-params := &IssueParams{Sort: "updated", State: "open"}
-req, err := githubBase.New().Get(path).QueryStruct(params).Request()
-```
-
-### Body
-
-#### JSON Body
-
-Define [JSON tagged structs](https://golang.org/pkg/encoding/json/). Use `BodyJSON` to JSON encode a struct as the Body on requests.
-
-```go
-type IssueRequest struct {
- Title string `json:"title,omitempty"`
- Body string `json:"body,omitempty"`
- Assignee string `json:"assignee,omitempty"`
- Milestone int `json:"milestone,omitempty"`
- Labels []string `json:"labels,omitempty"`
-}
-```
-
-```go
-githubBase := sling.New().Base("https://api.github.com/").Client(httpClient)
-path := fmt.Sprintf("repos/%s/%s/issues", owner, repo)
-
-body := &IssueRequest{
- Title: "Test title",
- Body: "Some issue",
-}
-req, err := githubBase.New().Post(path).BodyJSON(body).Request()
-```
-
-Requests will include an `application/json` Content-Type header.
-
-#### Form Body
-
-Define [url tagged structs](https://godoc.org/github.com/google/go-querystring/query). Use `BodyForm` to form url encode a struct as the Body on requests.
-
-```go
-type StatusUpdateParams struct {
- Status string `url:"status,omitempty"`
- InReplyToStatusId int64 `url:"in_reply_to_status_id,omitempty"`
- MediaIds []int64 `url:"media_ids,omitempty,comma"`
-}
-```
-
-```go
-tweetParams := &StatusUpdateParams{Status: "writing some Go"}
-req, err := twitterBase.New().Post(path).BodyForm(tweetParams).Request()
-```
-
-Requests will include an `application/x-www-form-urlencoded` Content-Type header.
-
-#### Plain Body
-
-Use `Body` to set a plain `io.Reader` on requests created by a Sling.
-
-```go
-body := strings.NewReader("raw body")
-req, err := sling.New().Base("https://example.com").Body(body).Request()
-```
-
-Set a content type header, if desired (e.g. `Set("Content-Type", "text/plain")`).
-
-### Extend a Sling
-
-Each Sling creates a standard `http.Request` (e.g. with some path and query
-params) each time `Request()` is called. You may wish to extend an existing Sling to minimize duplication (e.g. a common client or base url).
-
-Each Sling instance provides a `New()` method which creates an independent copy, so setting properties on the child won't mutate the parent Sling.
-
-```go
-const twitterApi = "https://api.twitter.com/1.1/"
-base := sling.New().Base(twitterApi).Client(authClient)
-
-// statuses/show.json Sling
-tweetShowSling := base.New().Get("statuses/show.json").QueryStruct(params)
-req, err := tweetShowSling.Request()
-
-// statuses/update.json Sling
-tweetPostSling := base.New().Post("statuses/update.json").BodyForm(params)
-req, err := tweetPostSling.Request()
-```
-
-Without the calls to `base.New()`, `tweetShowSling` and `tweetPostSling` would reference the base Sling and POST to
-"https://api.twitter.com/1.1/statuses/show.json/statuses/update.json", which
-is undesired.
-
-Recap: If you wish to *extend* a Sling, create a new child copy with `New()`.
-
-### Sending
-
-#### Receive
-
-Define a JSON struct to decode a type from 2XX success responses. Use `ReceiveSuccess(successV interface{})` to send a new Request and decode the response body into `successV` if it succeeds.
-
-```go
-// Github Issue (abbreviated)
-type Issue struct {
- Title string `json:"title"`
- Body string `json:"body"`
-}
-```
-
-```go
-issues := new([]Issue)
-resp, err := githubBase.New().Get(path).QueryStruct(params).ReceiveSuccess(issues)
-fmt.Println(issues, resp, err)
-```
-
-Most APIs return failure responses with JSON error details. To decode these, define success and failure JSON structs. Use `Receive(successV, failureV interface{})` to send a new Request that will automatically decode the response into the `successV` for 2XX responses or into `failureV` for non-2XX responses.
-
-```go
-type GithubError struct {
- Message string `json:"message"`
- Errors []struct {
- Resource string `json:"resource"`
- Field string `json:"field"`
- Code string `json:"code"`
- } `json:"errors"`
- DocumentationURL string `json:"documentation_url"`
-}
-```
-
-```go
-issues := new([]Issue)
-githubError := new(GithubError)
-resp, err := githubBase.New().Get(path).QueryStruct(params).Receive(issues, githubError)
-fmt.Println(issues, githubError, resp, err)
-```
-
-Pass a nil `successV` or `failureV` argument to skip JSON decoding into that value.
-
-### Build an API
-
-APIs typically define an endpoint (also called a service) for each type of resource. For example, here is a tiny Github IssueService which [lists](https://developer.github.com/v3/issues/#list-issues-for-a-repository) repository issues.
-
-```go
-const baseURL = "https://api.github.com/"
-
-type IssueService struct {
- sling *sling.Sling
-}
-
-func NewIssueService(httpClient *http.Client) *IssueService {
- return &IssueService{
- sling: sling.New().Client(httpClient).Base(baseURL),
- }
-}
-
-func (s *IssueService) ListByRepo(owner, repo string, params *IssueListParams) ([]Issue, *http.Response, error) {
- issues := new([]Issue)
- githubError := new(GithubError)
- path := fmt.Sprintf("repos/%s/%s/issues", owner, repo)
- resp, err := s.sling.New().Get(path).QueryStruct(params).Receive(issues, githubError)
- if err == nil {
- err = githubError
- }
- return *issues, resp, err
-}
-```
-
-## Example APIs using Sling
-
-* Digits [dghubble/go-digits](https://github.com/dghubble/go-digits)
-* GoSquared [drinkin/go-gosquared](https://github.com/drinkin/go-gosquared)
-* Kala [ajvb/kala](https://github.com/ajvb/kala)
-* Parse [fergstar/go-parse](https://github.com/fergstar/go-parse)
-* Rdio [apriendeau/shares](https://github.com/apriendeau/shares)
-* Swagger Generator [swagger-api/swagger-codegen](https://github.com/swagger-api/swagger-codegen)
-* Twitter [dghubble/go-twitter](https://github.com/dghubble/go-twitter)
-* Hacker News [mirceamironenco/go-hackernews](https://github.com/mirceamironenco/go-hackernews)
-* Stacksmith [jesustinoco/go-smith](https://github.com/jesustinoco/go-smith)
-
-Create a Pull Request to add a link to your own API.
-
-## Motivation
-
-Many client libraries follow the lead of [google/go-github](https://github.com/google/go-github) (our inspiration!), but do so by reimplementing logic common to all clients.
-
-This project borrows and abstracts those ideas into a Sling, an agnostic component any API client can use for creating and sending requests.
-
-## Contributing
-
-See the [Contributing Guide](https://gist.github.com/dghubble/be682c123727f70bcfe7).
-
-## License
-
-[MIT License](LICENSE)
diff --git a/vendor/src/github.com/dghubble/sling/doc.go b/vendor/src/github.com/dghubble/sling/doc.go
deleted file mode 100644
index dd2efb7..0000000
--- a/vendor/src/github.com/dghubble/sling/doc.go
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
-Package sling is a Go HTTP client library for creating and sending API requests.
-
-Slings store HTTP Request properties to simplify sending requests and decoding
-responses. Check the examples to learn how to compose a Sling into your API
-client.
-
-Usage
-
-Use a Sling to set path, method, header, query, or body properties and create an
-http.Request.
-
- type Params struct {
- Count int `url:"count,omitempty"`
- }
- params := &Params{Count: 5}
-
- req, err := sling.New().Get("https://example.com").QueryStruct(params).Request()
- client.Do(req)
-
-Path
-
-Use Path to set or extend the URL for created Requests. Extension means the
-path will be resolved relative to the existing URL.
-
- // creates a GET request to https://example.com/foo/bar
- req, err := sling.New().Base("https://example.com/").Path("foo/").Path("bar").Request()
-
-Use Get, Post, Put, Patch, Delete, or Head which are exactly the same as Path
-except they set the HTTP method too.
-
- req, err := sling.New().Post("http://upload.com/gophers")
-
-Headers
-
-Add or Set headers for requests created by a Sling.
-
- s := sling.New().Base(baseUrl).Set("User-Agent", "Gophergram API Client")
- req, err := s.New().Get("gophergram/list").Request()
-
-QueryStruct
-
-Define url parameter structs (https://godoc.org/github.com/google/go-querystring/query).
-Use QueryStruct to encode a struct as query parameters on requests.
-
- // Github Issue Parameters
- type IssueParams struct {
- Filter string `url:"filter,omitempty"`
- State string `url:"state,omitempty"`
- Labels string `url:"labels,omitempty"`
- Sort string `url:"sort,omitempty"`
- Direction string `url:"direction,omitempty"`
- Since string `url:"since,omitempty"`
- }
-
- githubBase := sling.New().Base("https://api.github.com/").Client(httpClient)
-
- path := fmt.Sprintf("repos/%s/%s/issues", owner, repo)
- params := &IssueParams{Sort: "updated", State: "open"}
- req, err := githubBase.New().Get(path).QueryStruct(params).Request()
-
-Json Body
-
-Define JSON tagged structs (https://golang.org/pkg/encoding/json/).
-Use BodyJSON to JSON encode a struct as the Body on requests.
-
- type IssueRequest struct {
- Title string `json:"title,omitempty"`
- Body string `json:"body,omitempty"`
- Assignee string `json:"assignee,omitempty"`
- Milestone int `json:"milestone,omitempty"`
- Labels []string `json:"labels,omitempty"`
- }
-
- githubBase := sling.New().Base("https://api.github.com/").Client(httpClient)
- path := fmt.Sprintf("repos/%s/%s/issues", owner, repo)
-
- body := &IssueRequest{
- Title: "Test title",
- Body: "Some issue",
- }
- req, err := githubBase.New().Post(path).BodyJSON(body).Request()
-
-Requests will include an "application/json" Content-Type header.
-
-Form Body
-
-Define url tagged structs (https://godoc.org/github.com/google/go-querystring/query).
-Use BodyForm to form url encode a struct as the Body on requests.
-
- type StatusUpdateParams struct {
- Status string `url:"status,omitempty"`
- InReplyToStatusId int64 `url:"in_reply_to_status_id,omitempty"`
- MediaIds []int64 `url:"media_ids,omitempty,comma"`
- }
-
- tweetParams := &StatusUpdateParams{Status: "writing some Go"}
- req, err := twitterBase.New().Post(path).BodyForm(tweetParams).Request()
-
-Requests will include an "application/x-www-form-urlencoded" Content-Type
-header.
-
-Plain Body
-
-Use Body to set a plain io.Reader on requests created by a Sling.
-
- body := strings.NewReader("raw body")
- req, err := sling.New().Base("https://example.com").Body(body).Request()
-
-Set a content type header, if desired (e.g. Set("Content-Type", "text/plain")).
-
-Extend a Sling
-
-Each Sling generates an http.Request (say with some path and query params)
-each time Request() is called, based on its state. When creating
-different slings, you may wish to extend an existing Sling to minimize
-duplication (e.g. a common client).
-
-Each Sling instance provides a New() method which creates an independent copy,
-so setting properties on the child won't mutate the parent Sling.
-
- const twitterApi = "https://api.twitter.com/1.1/"
- base := sling.New().Base(twitterApi).Client(authClient)
-
- // statuses/show.json Sling
- tweetShowSling := base.New().Get("statuses/show.json").QueryStruct(params)
- req, err := tweetShowSling.Request()
-
- // statuses/update.json Sling
- tweetPostSling := base.New().Post("statuses/update.json").BodyForm(params)
- req, err := tweetPostSling.Request()
-
-Without the calls to base.New(), tweetShowSling and tweetPostSling would
-reference the base Sling and POST to
-"https://api.twitter.com/1.1/statuses/show.json/statuses/update.json", which
-is undesired.
-
-Recap: If you wish to extend a Sling, create a new child copy with New().
-
-Receive
-
-Define a JSON struct to decode a type from 2XX success responses. Use
-ReceiveSuccess(successV interface{}) to send a new Request and decode the
-response body into successV if it succeeds.
-
- // Github Issue (abbreviated)
- type Issue struct {
- Title string `json:"title"`
- Body string `json:"body"`
- }
-
- issues := new([]Issue)
- resp, err := githubBase.New().Get(path).QueryStruct(params).ReceiveSuccess(issues)
- fmt.Println(issues, resp, err)
-
-Most APIs return failure responses with JSON error details. To decode these,
-define success and failure JSON structs. Use
-Receive(successV, failureV interface{}) to send a new Request that will
-automatically decode the response into the successV for 2XX responses or into
-failureV for non-2XX responses.
-
- type GithubError struct {
- Message string `json:"message"`
- Errors []struct {
- Resource string `json:"resource"`
- Field string `json:"field"`
- Code string `json:"code"`
- } `json:"errors"`
- DocumentationURL string `json:"documentation_url"`
- }
-
- issues := new([]Issue)
- githubError := new(GithubError)
- resp, err := githubBase.New().Get(path).QueryStruct(params).Receive(issues, githubError)
- fmt.Println(issues, githubError, resp, err)
-
-Pass a nil successV or failureV argument to skip JSON decoding into that value.
-*/
-package sling
diff --git a/vendor/src/github.com/dghubble/sling/examples/README.md b/vendor/src/github.com/dghubble/sling/examples/README.md
deleted file mode 100644
index 2bf16c5..0000000
--- a/vendor/src/github.com/dghubble/sling/examples/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
-
-## Example API Client with Sling
-
-Try the example Github API Client.
-
- cd examples
- go get .
-
-List the public issues on the [github.com/golang/go](https://github.com/golang/go) repository.
-
- go run github.go
-
-To list your public and private Github issues, pass your [Github Access Token](https://github.com/settings/tokens)
-
- go run github.go -access-token=xxx
-
-or set the `GITHUB_ACCESS_TOKEN` environment variable.
-
-For a complete Github API, see the excellent [google/go-github](https://github.com/google/go-github) package.
\ No newline at end of file
diff --git a/vendor/src/github.com/dghubble/sling/examples/github.go b/vendor/src/github.com/dghubble/sling/examples/github.go
deleted file mode 100644
index 1dee5ba..0000000
--- a/vendor/src/github.com/dghubble/sling/examples/github.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package main
-
-import (
- "flag"
- "fmt"
- "log"
- "net/http"
- "os"
-
- "github.com/coreos/pkg/flagutil"
- "github.com/dghubble/sling"
- "golang.org/x/oauth2"
-)
-
-const baseURL = "https://api.github.com/"
-
-// Issue is a simplified Github issue
-// https://developer.github.com/v3/issues/#response
-type Issue struct {
- ID int `json:"id"`
- URL string `json:"url"`
- Number int `json:"number"`
- State string `json:"state"`
- Title string `json:"title"`
- Body string `json:"body"`
-}
-
-// GithubError represents a Github API error response
-// https://developer.github.com/v3/#client-errors
-type GithubError struct {
- Message string `json:"message"`
- Errors []struct {
- Resource string `json:"resource"`
- Field string `json:"field"`
- Code string `json:"code"`
- } `json:"errors"`
- DocumentationURL string `json:"documentation_url"`
-}
-
-func (e GithubError) Error() string {
- return fmt.Sprintf("github: %v %+v %v", e.Message, e.Errors, e.DocumentationURL)
-}
-
-// IssueRequest is a simplified issue request
-// https://developer.github.com/v3/issues/#create-an-issue
-type IssueRequest struct {
- Title string `json:"title,omitempty"`
- Body string `json:"body,omitempty"`
- Assignee string `json:"assignee,omitempty"`
- Milestone int `json:"milestone,omitempty"`
- Labels []string `json:"labels,omitempty"`
-}
-
-// IssueListParams are the params for IssueService.List
-// https://developer.github.com/v3/issues/#parameters
-type IssueListParams struct {
- Filter string `url:"filter,omitempty"`
- State string `url:"state,omitempty"`
- Labels string `url:"labels,omitempty"`
- Sort string `url:"sort,omitempty"`
- Direction string `url:"direction,omitempty"`
- Since string `url:"since,omitempty"`
-}
-
-// Services
-
-// IssueService provides methods for creating and reading issues.
-type IssueService struct {
- sling *sling.Sling
-}
-
-// NewIssueService returns a new IssueService.
-func NewIssueService(httpClient *http.Client) *IssueService {
- return &IssueService{
- sling: sling.New().Client(httpClient).Base(baseURL),
- }
-}
-
-// List returns the authenticated user's issues across repos and orgs.
-func (s *IssueService) List(params *IssueListParams) ([]Issue, *http.Response, error) {
- issues := new([]Issue)
- githubError := new(GithubError)
- resp, err := s.sling.New().Path("issues").QueryStruct(params).Receive(issues, githubError)
- if err == nil {
- err = githubError
- }
- return *issues, resp, err
-}
-
-// ListByRepo returns a repository's issues.
-func (s *IssueService) ListByRepo(owner, repo string, params *IssueListParams) ([]Issue, *http.Response, error) {
- issues := new([]Issue)
- githubError := new(GithubError)
- path := fmt.Sprintf("repos/%s/%s/issues", owner, repo)
- resp, err := s.sling.New().Get(path).QueryStruct(params).Receive(issues, githubError)
- if err == nil {
- err = githubError
- }
- return *issues, resp, err
-}
-
-// Create creates a new issue on the specified repository.
-func (s *IssueService) Create(owner, repo string, issueBody *IssueRequest) (*Issue, *http.Response, error) {
- issue := new(Issue)
- githubError := new(GithubError)
- path := fmt.Sprintf("repos/%s/%s/issues", owner, repo)
- resp, err := s.sling.New().Post(path).BodyJSON(issueBody).Receive(issue, githubError)
- if err == nil {
- err = githubError
- }
- return issue, resp, err
-}
-
-// Client to wrap services
-
-// Client is a tiny Github client
-type Client struct {
- IssueService *IssueService
- // other service endpoints...
-}
-
-// NewClient returns a new Client
-func NewClient(httpClient *http.Client) *Client {
- return &Client{
- IssueService: NewIssueService(httpClient),
- }
-}
-
-func main() {
- // Github Unauthenticated API
- client := NewClient(nil)
- params := &IssueListParams{Sort: "updated"}
- issues, _, _ := client.IssueService.ListByRepo("golang", "go", params)
- fmt.Printf("Public golang/go Issues:\n%v\n", issues)
-
- // Github OAuth2 API
- flags := flag.NewFlagSet("github-example", flag.ExitOnError)
- // -access-token=xxx or GITHUB_ACCESS_TOKEN env var
- accessToken := flags.String("access-token", "", "Github Access Token")
- flags.Parse(os.Args[1:])
- flagutil.SetFlagsFromEnv(flags, "GITHUB")
-
- if *accessToken == "" {
- log.Fatal("Github Access Token required to list private issues")
- }
-
- config := &oauth2.Config{}
- token := &oauth2.Token{AccessToken: *accessToken}
- httpClient := config.Client(oauth2.NoContext, token)
-
- client = NewClient(httpClient)
- issues, _, _ = client.IssueService.List(params)
- fmt.Printf("Your Github Issues:\n%v\n", issues)
-
- // body := &IssueRequest{
- // Title: "Test title",
- // Body: "Some test issue",
- // }
- // issue, _, _ := client.IssueService.Create("dghubble", "temp", body)
- // fmt.Println(issue)
-}
diff --git a/vendor/src/github.com/dghubble/sling/sling.go b/vendor/src/github.com/dghubble/sling/sling.go
deleted file mode 100644
index e9370e7..0000000
--- a/vendor/src/github.com/dghubble/sling/sling.go
+++ /dev/null
@@ -1,421 +0,0 @@
-package sling
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/json"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strings"
-
- goquery "github.com/google/go-querystring/query"
-)
-
-const (
- contentType = "Content-Type"
- jsonContentType = "application/json"
- formContentType = "application/x-www-form-urlencoded"
-)
-
-// Doer executes http requests. It is implemented by *http.Client. You can
-// wrap *http.Client with layers of Doers to form a stack of client-side
-// middleware.
-type Doer interface {
- Do(req *http.Request) (*http.Response, error)
-}
-
-// Sling is an HTTP Request builder and sender.
-type Sling struct {
- // http Client for doing requests
- httpClient Doer
- // HTTP method (GET, POST, etc.)
- method string
- // raw url string for requests
- rawURL string
- // stores key-values pairs to add to request's Headers
- header http.Header
- // url tagged query structs
- queryStructs []interface{}
- // json tagged body struct
- bodyJSON interface{}
- // url tagged body struct (form)
- bodyForm interface{}
- // simply assigned body
- body io.ReadCloser
-}
-
-// New returns a new Sling with an http DefaultClient.
-func New() *Sling {
- return &Sling{
- httpClient: http.DefaultClient,
- method: "GET",
- header: make(http.Header),
- queryStructs: make([]interface{}, 0),
- }
-}
-
-// New returns a copy of a Sling for creating a new Sling with properties
-// from a parent Sling. For example,
-//
-// parentSling := sling.New().Client(client).Base("https://api.io/")
-// fooSling := parentSling.New().Get("foo/")
-// barSling := parentSling.New().Get("bar/")
-//
-// fooSling and barSling will both use the same client, but send requests to
-// https://api.io/foo/ and https://api.io/bar/ respectively.
-//
-// Note that query and body values are copied so if pointer values are used,
-// mutating the original value will mutate the value within the child Sling.
-func (s *Sling) New() *Sling {
- // copy Headers pairs into new Header map
- headerCopy := make(http.Header)
- for k, v := range s.header {
- headerCopy[k] = v
- }
- return &Sling{
- httpClient: s.httpClient,
- method: s.method,
- rawURL: s.rawURL,
- header: headerCopy,
- queryStructs: append([]interface{}{}, s.queryStructs...),
- bodyJSON: s.bodyJSON,
- bodyForm: s.bodyForm,
- body: s.body,
- }
-}
-
-// Http Client
-
-// Client sets the http Client used to do requests. If a nil client is given,
-// the http.DefaultClient will be used.
-func (s *Sling) Client(httpClient *http.Client) *Sling {
- if httpClient == nil {
- return s.Doer(http.DefaultClient)
- }
- return s.Doer(httpClient)
-}
-
-// Doer sets the custom Doer implementation used to do requests.
-// If a nil client is given, the http.DefaultClient will be used.
-func (s *Sling) Doer(doer Doer) *Sling {
- if doer == nil {
- s.httpClient = http.DefaultClient
- } else {
- s.httpClient = doer
- }
- return s
-}
-
-// Method
-
-// Head sets the Sling method to HEAD and sets the given pathURL.
-func (s *Sling) Head(pathURL string) *Sling {
- s.method = "HEAD"
- return s.Path(pathURL)
-}
-
-// Get sets the Sling method to GET and sets the given pathURL.
-func (s *Sling) Get(pathURL string) *Sling {
- s.method = "GET"
- return s.Path(pathURL)
-}
-
-// Post sets the Sling method to POST and sets the given pathURL.
-func (s *Sling) Post(pathURL string) *Sling {
- s.method = "POST"
- return s.Path(pathURL)
-}
-
-// Put sets the Sling method to PUT and sets the given pathURL.
-func (s *Sling) Put(pathURL string) *Sling {
- s.method = "PUT"
- return s.Path(pathURL)
-}
-
-// Patch sets the Sling method to PATCH and sets the given pathURL.
-func (s *Sling) Patch(pathURL string) *Sling {
- s.method = "PATCH"
- return s.Path(pathURL)
-}
-
-// Delete sets the Sling method to DELETE and sets the given pathURL.
-func (s *Sling) Delete(pathURL string) *Sling {
- s.method = "DELETE"
- return s.Path(pathURL)
-}
-
-// Header
-
-// Add adds the key, value pair in Headers, appending values for existing keys
-// to the key's values. Header keys are canonicalized.
-func (s *Sling) Add(key, value string) *Sling {
- s.header.Add(key, value)
- return s
-}
-
-// Set sets the key, value pair in Headers, replacing existing values
-// associated with key. Header keys are canonicalized.
-func (s *Sling) Set(key, value string) *Sling {
- s.header.Set(key, value)
- return s
-}
-
-// SetBasicAuth sets the Authorization header to use HTTP Basic Authentication
-// with the provided username and password. With HTTP Basic Authentication
-// the provided username and password are not encrypted.
-func (s *Sling) SetBasicAuth(username, password string) *Sling {
- return s.Set("Authorization", "Basic "+basicAuth(username, password))
-}
-
-// basicAuth returns the base64 encoded username:password for basic auth copied
-// from net/http.
-func basicAuth(username, password string) string {
- auth := username + ":" + password
- return base64.StdEncoding.EncodeToString([]byte(auth))
-}
-
-// Url
-
-// Base sets the rawURL. If you intend to extend the url with Path,
-// baseUrl should be specified with a trailing slash.
-func (s *Sling) Base(rawURL string) *Sling {
- s.rawURL = rawURL
- return s
-}
-
-// Path extends the rawURL with the given path by resolving the reference to
-// an absolute URL. If parsing errors occur, the rawURL is left unmodified.
-func (s *Sling) Path(path string) *Sling {
- baseURL, baseErr := url.Parse(s.rawURL)
- pathURL, pathErr := url.Parse(path)
- if baseErr == nil && pathErr == nil {
- s.rawURL = baseURL.ResolveReference(pathURL).String()
- return s
- }
- return s
-}
-
-// QueryStruct appends the queryStruct to the Sling's queryStructs. The value
-// pointed to by each queryStruct will be encoded as url query parameters on
-// new requests (see Request()).
-// The queryStruct argument should be a pointer to a url tagged struct. See
-// https://godoc.org/github.com/google/go-querystring/query for details.
-func (s *Sling) QueryStruct(queryStruct interface{}) *Sling {
- if queryStruct != nil {
- s.queryStructs = append(s.queryStructs, queryStruct)
- }
- return s
-}
-
-// Body
-
-// BodyJSON sets the Sling's bodyJSON. The value pointed to by the bodyJSON
-// will be JSON encoded as the Body on new requests (see Request()).
-// The bodyJSON argument should be a pointer to a JSON tagged struct. See
-// https://golang.org/pkg/encoding/json/#MarshalIndent for details.
-func (s *Sling) BodyJSON(bodyJSON interface{}) *Sling {
- if bodyJSON != nil {
- s.bodyJSON = bodyJSON
- s.Set(contentType, jsonContentType)
- }
- return s
-}
-
-// BodyForm sets the Sling's bodyForm. The value pointed to by the bodyForm
-// will be url encoded as the Body on new requests (see Request()).
-// The bodyStruct argument should be a pointer to a url tagged struct. See
-// https://godoc.org/github.com/google/go-querystring/query for details.
-func (s *Sling) BodyForm(bodyForm interface{}) *Sling {
- if bodyForm != nil {
- s.bodyForm = bodyForm
- s.Set(contentType, formContentType)
- }
- return s
-}
-
-// Body sets the Sling's body. The body value will be set as the Body on new
-// requests (see Request()).
-// If the provided body is also an io.Closer, the request Body will be closed
-// by http.Client methods.
-func (s *Sling) Body(body io.Reader) *Sling {
- rc, ok := body.(io.ReadCloser)
- if !ok && body != nil {
- rc = ioutil.NopCloser(body)
- }
- if rc != nil {
- s.body = rc
- }
- return s
-}
-
-// Requests
-
-// Request returns a new http.Request created with the Sling properties.
-// Returns any errors parsing the rawURL, encoding query structs, encoding
-// the body, or creating the http.Request.
-func (s *Sling) Request() (*http.Request, error) {
- reqURL, err := url.Parse(s.rawURL)
- if err != nil {
- return nil, err
- }
- err = addQueryStructs(reqURL, s.queryStructs)
- if err != nil {
- return nil, err
- }
- body, err := s.getRequestBody()
- if err != nil {
- return nil, err
- }
- req, err := http.NewRequest(s.method, reqURL.String(), body)
- if err != nil {
- return nil, err
- }
- addHeaders(req, s.header)
- return req, err
-}
-
-// addQueryStructs parses url tagged query structs using go-querystring to
-// encode them to url.Values and format them onto the url.RawQuery. Any
-// query parsing or encoding errors are returned.
-func addQueryStructs(reqURL *url.URL, queryStructs []interface{}) error {
- urlValues, err := url.ParseQuery(reqURL.RawQuery)
- if err != nil {
- return err
- }
- // encodes query structs into a url.Values map and merges maps
- for _, queryStruct := range queryStructs {
- queryValues, err := goquery.Values(queryStruct)
- if err != nil {
- return err
- }
- for key, values := range queryValues {
- for _, value := range values {
- urlValues.Add(key, value)
- }
- }
- }
- // url.Values format to a sorted "url encoded" string, e.g. "key=val&foo=bar"
- reqURL.RawQuery = urlValues.Encode()
- return nil
-}
-
-// getRequestBody returns the io.Reader which should be used as the body
-// of new Requests.
-func (s *Sling) getRequestBody() (body io.Reader, err error) {
- if s.bodyJSON != nil && s.header.Get(contentType) == jsonContentType {
- body, err = encodeBodyJSON(s.bodyJSON)
- if err != nil {
- return nil, err
- }
- } else if s.bodyForm != nil && s.header.Get(contentType) == formContentType {
- body, err = encodeBodyForm(s.bodyForm)
- if err != nil {
- return nil, err
- }
- } else if s.body != nil {
- body = s.body
- }
- return body, nil
-}
-
-// encodeBodyJSON JSON encodes the value pointed to by bodyJSON into an
-// io.Reader, typically for use as a Request Body.
-func encodeBodyJSON(bodyJSON interface{}) (io.Reader, error) {
- var buf = new(bytes.Buffer)
- if bodyJSON != nil {
- buf = &bytes.Buffer{}
- err := json.NewEncoder(buf).Encode(bodyJSON)
- if err != nil {
- return nil, err
- }
- }
- return buf, nil
-}
-
-// encodeBodyForm url encodes the value pointed to by bodyForm into an
-// io.Reader, typically for use as a Request Body.
-func encodeBodyForm(bodyForm interface{}) (io.Reader, error) {
- values, err := goquery.Values(bodyForm)
- if err != nil {
- return nil, err
- }
- return strings.NewReader(values.Encode()), nil
-}
-
-// addHeaders adds the key, value pairs from the given http.Header to the
-// request. Values for existing keys are appended to the keys values.
-func addHeaders(req *http.Request, header http.Header) {
- for key, values := range header {
- for _, value := range values {
- req.Header.Add(key, value)
- }
- }
-}
-
-// Sending
-
-// ReceiveSuccess creates a new HTTP request and returns the response. Success
-// responses (2XX) are JSON decoded into the value pointed to by successV.
-// Any error creating the request, sending it, or decoding a 2XX response
-// is returned.
-func (s *Sling) ReceiveSuccess(successV interface{}) (*http.Response, error) {
- return s.Receive(successV, nil)
-}
-
-// Receive creates a new HTTP request and returns the response. Success
-// responses (2XX) are JSON decoded into the value pointed to by successV and
-// other responses are JSON decoded into the value pointed to by failureV.
-// Any error creating the request, sending it, or decoding the response is
-// returned.
-// Receive is shorthand for calling Request and Do.
-func (s *Sling) Receive(successV, failureV interface{}) (*http.Response, error) {
- req, err := s.Request()
- if err != nil {
- return nil, err
- }
- return s.Do(req, successV, failureV)
-}
-
-// Do sends an HTTP request and returns the response. Success responses (2XX)
-// are JSON decoded into the value pointed to by successV and other responses
-// are JSON decoded into the value pointed to by failureV.
-// Any error sending the request or decoding the response is returned.
-func (s *Sling) Do(req *http.Request, successV, failureV interface{}) (*http.Response, error) {
- resp, err := s.httpClient.Do(req)
- if err != nil {
- return resp, err
- }
- // when err is nil, resp contains a non-nil resp.Body which must be closed
- defer resp.Body.Close()
- if strings.Contains(resp.Header.Get(contentType), jsonContentType) {
- err = decodeResponseJSON(resp, successV, failureV)
- }
- return resp, err
-}
-
-// decodeResponse decodes response Body into the value pointed to by successV
-// if the response is a success (2XX) or into the value pointed to by failureV
-// otherwise. If the successV or failureV argument to decode into is nil,
-// decoding is skipped.
-// Caller is responsible for closing the resp.Body.
-func decodeResponseJSON(resp *http.Response, successV, failureV interface{}) error {
- if code := resp.StatusCode; 200 <= code && code <= 299 {
- if successV != nil {
- return decodeResponseBodyJSON(resp, successV)
- }
- } else {
- if failureV != nil {
- return decodeResponseBodyJSON(resp, failureV)
- }
- }
- return nil
-}
-
-// decodeResponseBodyJSON JSON decodes a Response Body into the value pointed
-// to by v.
-// Caller must provide a non-nil v and close the resp.Body.
-func decodeResponseBodyJSON(resp *http.Response, v interface{}) error {
- return json.NewDecoder(resp.Body).Decode(v)
-}
diff --git a/vendor/src/github.com/dghubble/sling/sling_test.go b/vendor/src/github.com/dghubble/sling/sling_test.go
deleted file mode 100644
index 0eb0254..0000000
--- a/vendor/src/github.com/dghubble/sling/sling_test.go
+++ /dev/null
@@ -1,863 +0,0 @@
-package sling
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "net/http"
- "net/http/httptest"
- "net/url"
- "reflect"
- "strings"
- "testing"
-)
-
-type FakeParams struct {
- KindName string `url:"kind_name"`
- Count int `url:"count"`
-}
-
-// Url-tagged query struct
-var paramsA = struct {
- Limit int `url:"limit"`
-}{
- 30,
-}
-var paramsB = FakeParams{KindName: "recent", Count: 25}
-
-// Json-tagged model struct
-type FakeModel struct {
- Text string `json:"text,omitempty"`
- FavoriteCount int64 `json:"favorite_count,omitempty"`
- Temperature float64 `json:"temperature,omitempty"`
-}
-
-var modelA = FakeModel{Text: "note", FavoriteCount: 12}
-
-func TestNew(t *testing.T) {
- sling := New()
- if sling.httpClient != http.DefaultClient {
- t.Errorf("expected %v, got %v", http.DefaultClient, sling.httpClient)
- }
- if sling.header == nil {
- t.Errorf("Header map not initialized with make")
- }
- if sling.queryStructs == nil {
- t.Errorf("queryStructs not initialized with make")
- }
-}
-
-func TestSlingNew(t *testing.T) {
- cases := []*Sling{
- &Sling{httpClient: &http.Client{}, method: "GET", rawURL: "http://example.com"},
- &Sling{httpClient: nil, method: "", rawURL: "http://example.com"},
- &Sling{queryStructs: make([]interface{}, 0)},
- &Sling{queryStructs: []interface{}{paramsA}},
- &Sling{queryStructs: []interface{}{paramsA, paramsB}},
- &Sling{bodyJSON: &FakeModel{Text: "a"}},
- &Sling{bodyJSON: FakeModel{Text: "a"}},
- &Sling{bodyJSON: nil},
- New().Add("Content-Type", "application/json"),
- New().Add("A", "B").Add("a", "c").New(),
- New().Add("A", "B").New().Add("a", "c"),
- New().BodyForm(paramsB),
- New().BodyForm(paramsB).New(),
- }
- for _, sling := range cases {
- child := sling.New()
- if child.httpClient != sling.httpClient {
- t.Errorf("expected %v, got %v", sling.httpClient, child.httpClient)
- }
- if child.method != sling.method {
- t.Errorf("expected %s, got %s", sling.method, child.method)
- }
- if child.rawURL != sling.rawURL {
- t.Errorf("expected %s, got %s", sling.rawURL, child.rawURL)
- }
- // Header should be a copy of parent Sling header. For example, calling
- // baseSling.Add("k","v") should not mutate previously created child Slings
- if sling.header != nil {
- // struct literal cases don't init Header in usual way, skip header check
- if !reflect.DeepEqual(sling.header, child.header) {
- t.Errorf("not DeepEqual: expected %v, got %v", sling.header, child.header)
- }
- sling.header.Add("K", "V")
- if child.header.Get("K") != "" {
- t.Errorf("child.header was a reference to original map, should be copy")
- }
- }
- // queryStruct slice should be a new slice with a copy of the contents
- if len(sling.queryStructs) > 0 {
- // mutating one slice should not mutate the other
- child.queryStructs[0] = nil
- if sling.queryStructs[0] == nil {
- t.Errorf("child.queryStructs was a re-slice, expected slice with copied contents")
- }
- }
- // bodyJSON should be copied
- if child.bodyJSON != sling.bodyJSON {
- t.Errorf("expected %v, got %v", sling.bodyJSON, child.bodyJSON)
- }
- // bodyForm should be copied
- if child.bodyForm != sling.bodyForm {
- t.Errorf("expected %v, got %v", sling.bodyForm, child.bodyForm)
- }
- }
-}
-
-func TestClientSetter(t *testing.T) {
- developerClient := &http.Client{}
- cases := []struct {
- input *http.Client
- expected *http.Client
- }{
- {nil, http.DefaultClient},
- {developerClient, developerClient},
- }
- for _, c := range cases {
- sling := New()
- sling.Client(c.input)
- if sling.httpClient != c.expected {
- t.Errorf("input %v, expected %v, got %v", c.input, c.expected, sling.httpClient)
- }
- }
-}
-
-func TestDoerSetter(t *testing.T) {
- developerClient := &http.Client{}
- cases := []struct {
- input Doer
- expected Doer
- }{
- {nil, http.DefaultClient},
- {developerClient, developerClient},
- }
- for _, c := range cases {
- sling := New()
- sling.Doer(c.input)
- if sling.httpClient != c.expected {
- t.Errorf("input %v, expected %v, got %v", c.input, c.expected, sling.httpClient)
- }
- }
-}
-
-func TestBaseSetter(t *testing.T) {
- cases := []string{"http://a.io/", "http://b.io", "/path", "path", ""}
- for _, base := range cases {
- sling := New().Base(base)
- if sling.rawURL != base {
- t.Errorf("expected %s, got %s", base, sling.rawURL)
- }
- }
-}
-
-func TestPathSetter(t *testing.T) {
- cases := []struct {
- rawURL string
- path string
- expectedRawURL string
- }{
- {"http://a.io/", "foo", "http://a.io/foo"},
- {"http://a.io/", "/foo", "http://a.io/foo"},
- {"http://a.io", "foo", "http://a.io/foo"},
- {"http://a.io", "/foo", "http://a.io/foo"},
- {"http://a.io/foo/", "bar", "http://a.io/foo/bar"},
- // rawURL should end in trailing slash if it is to be Path extended
- {"http://a.io/foo", "bar", "http://a.io/bar"},
- {"http://a.io/foo", "/bar", "http://a.io/bar"},
- // path extension is absolute
- {"http://a.io", "http://b.io/", "http://b.io/"},
- {"http://a.io/", "http://b.io/", "http://b.io/"},
- {"http://a.io", "http://b.io", "http://b.io"},
- {"http://a.io/", "http://b.io", "http://b.io"},
- // empty base, empty path
- {"", "http://b.io", "http://b.io"},
- {"http://a.io", "", "http://a.io"},
- {"", "", ""},
- }
- for _, c := range cases {
- sling := New().Base(c.rawURL).Path(c.path)
- if sling.rawURL != c.expectedRawURL {
- t.Errorf("expected %s, got %s", c.expectedRawURL, sling.rawURL)
- }
- }
-}
-
-func TestMethodSetters(t *testing.T) {
- cases := []struct {
- sling *Sling
- expectedMethod string
- }{
- {New().Path("http://a.io"), "GET"},
- {New().Head("http://a.io"), "HEAD"},
- {New().Get("http://a.io"), "GET"},
- {New().Post("http://a.io"), "POST"},
- {New().Put("http://a.io"), "PUT"},
- {New().Patch("http://a.io"), "PATCH"},
- {New().Delete("http://a.io"), "DELETE"},
- }
- for _, c := range cases {
- if c.sling.method != c.expectedMethod {
- t.Errorf("expected method %s, got %s", c.expectedMethod, c.sling.method)
- }
- }
-}
-
-func TestAddHeader(t *testing.T) {
- cases := []struct {
- sling *Sling
- expectedHeader map[string][]string
- }{
- {New().Add("authorization", "OAuth key=\"value\""), map[string][]string{"Authorization": []string{"OAuth key=\"value\""}}},
- // header keys should be canonicalized
- {New().Add("content-tYPE", "application/json").Add("User-AGENT", "sling"), map[string][]string{"Content-Type": []string{"application/json"}, "User-Agent": []string{"sling"}}},
- // values for existing keys should be appended
- {New().Add("A", "B").Add("a", "c"), map[string][]string{"A": []string{"B", "c"}}},
- // Add should add to values for keys added by parent Slings
- {New().Add("A", "B").Add("a", "c").New(), map[string][]string{"A": []string{"B", "c"}}},
- {New().Add("A", "B").New().Add("a", "c"), map[string][]string{"A": []string{"B", "c"}}},
- }
- for _, c := range cases {
- // type conversion from header to alias'd map for deep equality comparison
- headerMap := map[string][]string(c.sling.header)
- if !reflect.DeepEqual(c.expectedHeader, headerMap) {
- t.Errorf("not DeepEqual: expected %v, got %v", c.expectedHeader, headerMap)
- }
- }
-}
-
-func TestSetHeader(t *testing.T) {
- cases := []struct {
- sling *Sling
- expectedHeader map[string][]string
- }{
- // should replace existing values associated with key
- {New().Add("A", "B").Set("a", "c"), map[string][]string{"A": []string{"c"}}},
- {New().Set("content-type", "A").Set("Content-Type", "B"), map[string][]string{"Content-Type": []string{"B"}}},
- // Set should replace values received by copying parent Slings
- {New().Set("A", "B").Add("a", "c").New(), map[string][]string{"A": []string{"B", "c"}}},
- {New().Add("A", "B").New().Set("a", "c"), map[string][]string{"A": []string{"c"}}},
- }
- for _, c := range cases {
- // type conversion from Header to alias'd map for deep equality comparison
- headerMap := map[string][]string(c.sling.header)
- if !reflect.DeepEqual(c.expectedHeader, headerMap) {
- t.Errorf("not DeepEqual: expected %v, got %v", c.expectedHeader, headerMap)
- }
- }
-}
-
-func TestBasicAuth(t *testing.T) {
- cases := []struct {
- sling *Sling
- expectedAuth []string
- }{
- // basic auth: username & password
- {New().SetBasicAuth("Aladdin", "open sesame"), []string{"Aladdin", "open sesame"}},
- // empty username
- {New().SetBasicAuth("", "secret"), []string{"", "secret"}},
- // empty password
- {New().SetBasicAuth("admin", ""), []string{"admin", ""}},
- }
- for _, c := range cases {
- req, err := c.sling.Request()
- if err != nil {
- t.Errorf("unexpected error when building Request with .SetBasicAuth()")
- }
- username, password, ok := req.BasicAuth()
- if !ok {
- t.Errorf("basic auth missing when expected")
- }
- auth := []string{username, password}
- if !reflect.DeepEqual(c.expectedAuth, auth) {
- t.Errorf("not DeepEqual: expected %v, got %v", c.expectedAuth, auth)
- }
- }
-}
-
-func TestQueryStructSetter(t *testing.T) {
- cases := []struct {
- sling *Sling
- expectedStructs []interface{}
- }{
- {New(), []interface{}{}},
- {New().QueryStruct(nil), []interface{}{}},
- {New().QueryStruct(paramsA), []interface{}{paramsA}},
- {New().QueryStruct(paramsA).QueryStruct(paramsA), []interface{}{paramsA, paramsA}},
- {New().QueryStruct(paramsA).QueryStruct(paramsB), []interface{}{paramsA, paramsB}},
- {New().QueryStruct(paramsA).New(), []interface{}{paramsA}},
- {New().QueryStruct(paramsA).New().QueryStruct(paramsB), []interface{}{paramsA, paramsB}},
- }
-
- for _, c := range cases {
- if count := len(c.sling.queryStructs); count != len(c.expectedStructs) {
- t.Errorf("expected length %d, got %d", len(c.expectedStructs), count)
- }
- check:
- for _, expected := range c.expectedStructs {
- for _, param := range c.sling.queryStructs {
- if param == expected {
- continue check
- }
- }
- t.Errorf("expected to find %v in %v", expected, c.sling.queryStructs)
- }
- }
-}
-
-func TestBodyJSONSetter(t *testing.T) {
- fakeModel := &FakeModel{}
- cases := []struct {
- initial interface{}
- input interface{}
- expected interface{}
- }{
- // json tagged struct is set as bodyJSON
- {nil, fakeModel, fakeModel},
- // nil argument to bodyJSON does not replace existing bodyJSON
- {fakeModel, nil, fakeModel},
- // nil bodyJSON remains nil
- {nil, nil, nil},
- }
- for _, c := range cases {
- sling := New()
- sling.bodyJSON = c.initial
- sling.BodyJSON(c.input)
- if sling.bodyJSON != c.expected {
- t.Errorf("expected %v, got %v", c.expected, sling.bodyJSON)
- }
- // Header Content-Type should be application/json if bodyJSON arg was non-nil
- if c.input != nil && sling.header.Get(contentType) != jsonContentType {
- t.Errorf("Incorrect or missing header, expected %s, got %s", jsonContentType, sling.header.Get(contentType))
- } else if c.input == nil && sling.header.Get(contentType) != "" {
- t.Errorf("did not expect a Content-Type header, got %s", sling.header.Get(contentType))
- }
- }
-}
-
-func TestBodyFormSetter(t *testing.T) {
- cases := []struct {
- initial interface{}
- input interface{}
- expected interface{}
- }{
- // url tagged struct is set as bodyStruct
- {nil, paramsB, paramsB},
- // nil argument to bodyStruct does not replace existing bodyStruct
- {paramsB, nil, paramsB},
- // nil bodyStruct remains nil
- {nil, nil, nil},
- }
- for _, c := range cases {
- sling := New()
- sling.bodyForm = c.initial
- sling.BodyForm(c.input)
- if sling.bodyForm != c.expected {
- t.Errorf("expected %v, got %v", c.expected, sling.bodyForm)
- }
- // Content-Type should be application/x-www-form-urlencoded if bodyStruct was non-nil
- if c.input != nil && sling.header.Get(contentType) != formContentType {
- t.Errorf("Incorrect or missing header, expected %s, got %s", formContentType, sling.header.Get(contentType))
- } else if c.input == nil && sling.header.Get(contentType) != "" {
- t.Errorf("did not expect a Content-Type header, got %s", sling.header.Get(contentType))
- }
- }
-
-}
-
-func TestBodySetter(t *testing.T) {
- var testInput = ioutil.NopCloser(strings.NewReader("test"))
- cases := []struct {
- initial io.ReadCloser
- input io.Reader
- expected io.Reader
- }{
- // nil body is overriden by a set body
- {nil, testInput, testInput},
- // initial body is not overriden by nil body
- {testInput, nil, testInput},
- // nil body is returned unaltered
- {nil, nil, nil},
- }
- for _, c := range cases {
- sling := New()
- sling.body = c.initial
- sling.Body(c.input)
- body, err := sling.getRequestBody()
- if err != nil {
- t.Errorf("expected nil, got %v", err)
- }
- if body != c.expected {
- t.Errorf("expected %v, got %v", c.expected, body)
- }
- }
-}
-
-func TestRequest_urlAndMethod(t *testing.T) {
- cases := []struct {
- sling *Sling
- expectedMethod string
- expectedURL string
- expectedErr error
- }{
- {New().Base("http://a.io"), "GET", "http://a.io", nil},
- {New().Path("http://a.io"), "GET", "http://a.io", nil},
- {New().Get("http://a.io"), "GET", "http://a.io", nil},
- {New().Put("http://a.io"), "PUT", "http://a.io", nil},
- {New().Base("http://a.io/").Path("foo"), "GET", "http://a.io/foo", nil},
- {New().Base("http://a.io/").Post("foo"), "POST", "http://a.io/foo", nil},
- // if relative path is an absolute url, base is ignored
- {New().Base("http://a.io").Path("http://b.io"), "GET", "http://b.io", nil},
- {New().Path("http://a.io").Path("http://b.io"), "GET", "http://b.io", nil},
- // last method setter takes priority
- {New().Get("http://b.io").Post("http://a.io"), "POST", "http://a.io", nil},
- {New().Post("http://a.io/").Put("foo/").Delete("bar"), "DELETE", "http://a.io/foo/bar", nil},
- // last Base setter takes priority
- {New().Base("http://a.io").Base("http://b.io"), "GET", "http://b.io", nil},
- // Path setters are additive
- {New().Base("http://a.io/").Path("foo/").Path("bar"), "GET", "http://a.io/foo/bar", nil},
- {New().Path("http://a.io/").Path("foo/").Path("bar"), "GET", "http://a.io/foo/bar", nil},
- // removes extra '/' between base and ref url
- {New().Base("http://a.io/").Get("/foo"), "GET", "http://a.io/foo", nil},
- }
- for _, c := range cases {
- req, err := c.sling.Request()
- if err != c.expectedErr {
- t.Errorf("expected error %v, got %v for %+v", c.expectedErr, err, c.sling)
- }
- if req.URL.String() != c.expectedURL {
- t.Errorf("expected url %s, got %s for %+v", c.expectedURL, req.URL.String(), c.sling)
- }
- if req.Method != c.expectedMethod {
- t.Errorf("expected method %s, got %s for %+v", c.expectedMethod, req.Method, c.sling)
- }
- }
-}
-
-func TestRequest_queryStructs(t *testing.T) {
- cases := []struct {
- sling *Sling
- expectedURL string
- }{
- {New().Base("http://a.io").QueryStruct(paramsA), "http://a.io?limit=30"},
- {New().Base("http://a.io").QueryStruct(paramsA).QueryStruct(paramsB), "http://a.io?count=25&kind_name=recent&limit=30"},
- {New().Base("http://a.io/").Path("foo?path=yes").QueryStruct(paramsA), "http://a.io/foo?limit=30&path=yes"},
- {New().Base("http://a.io").QueryStruct(paramsA).New(), "http://a.io?limit=30"},
- {New().Base("http://a.io").QueryStruct(paramsA).New().QueryStruct(paramsB), "http://a.io?count=25&kind_name=recent&limit=30"},
- }
- for _, c := range cases {
- req, _ := c.sling.Request()
- if req.URL.String() != c.expectedURL {
- t.Errorf("expected url %s, got %s for %+v", c.expectedURL, req.URL.String(), c.sling)
- }
- }
-}
-
-func TestRequest_body(t *testing.T) {
- cases := []struct {
- sling *Sling
- expectedBody string // expected Body io.Reader as a string
- expectedContentType string
- }{
- // BodyJSON
- {New().BodyJSON(modelA), "{\"text\":\"note\",\"favorite_count\":12}\n", jsonContentType},
- {New().BodyJSON(&modelA), "{\"text\":\"note\",\"favorite_count\":12}\n", jsonContentType},
- {New().BodyJSON(&FakeModel{}), "{}\n", jsonContentType},
- {New().BodyJSON(FakeModel{}), "{}\n", jsonContentType},
- // BodyJSON overrides existing values
- {New().BodyJSON(&FakeModel{}).BodyJSON(&FakeModel{Text: "msg"}), "{\"text\":\"msg\"}\n", jsonContentType},
- // BodyForm
- {New().BodyForm(paramsA), "limit=30", formContentType},
- {New().BodyForm(paramsB), "count=25&kind_name=recent", formContentType},
- {New().BodyForm(¶msB), "count=25&kind_name=recent", formContentType},
- // BodyForm overrides existing values
- {New().BodyForm(paramsA).New().BodyForm(paramsB), "count=25&kind_name=recent", formContentType},
- // Mixture of BodyJSON and BodyForm prefers body setter called last with a non-nil argument
- {New().BodyForm(paramsB).New().BodyJSON(modelA), "{\"text\":\"note\",\"favorite_count\":12}\n", jsonContentType},
- {New().BodyJSON(modelA).New().BodyForm(paramsB), "count=25&kind_name=recent", formContentType},
- {New().BodyForm(paramsB).New().BodyJSON(nil), "count=25&kind_name=recent", formContentType},
- {New().BodyJSON(modelA).New().BodyForm(nil), "{\"text\":\"note\",\"favorite_count\":12}\n", jsonContentType},
- // Body
- {New().Body(strings.NewReader("this-is-a-test")), "this-is-a-test", ""},
- {New().Body(strings.NewReader("a")).Body(strings.NewReader("b")), "b", ""},
- }
- for _, c := range cases {
- req, _ := c.sling.Request()
- buf := new(bytes.Buffer)
- buf.ReadFrom(req.Body)
- // req.Body should have contained the expectedBody string
- if value := buf.String(); value != c.expectedBody {
- t.Errorf("expected Request.Body %s, got %s", c.expectedBody, value)
- }
- // Header Content-Type should be expectedContentType ("" means no contentType expected)
- if actualHeader := req.Header.Get(contentType); actualHeader != c.expectedContentType && c.expectedContentType != "" {
- t.Errorf("Incorrect or missing header, expected %s, got %s", c.expectedContentType, actualHeader)
- }
- }
-}
-
-func TestRequest_bodyNoData(t *testing.T) {
- // test that Body is left nil when no bodyJSON or bodyStruct set
- slings := []*Sling{
- New(),
- New().BodyJSON(nil),
- New().BodyForm(nil),
- }
- for _, sling := range slings {
- req, _ := sling.Request()
- if req.Body != nil {
- t.Errorf("expected nil Request.Body, got %v", req.Body)
- }
- // Header Content-Type should not be set when bodyJSON argument was nil or never called
- if actualHeader := req.Header.Get(contentType); actualHeader != "" {
- t.Errorf("did not expect a Content-Type header, got %s", actualHeader)
- }
- }
-}
-
-func TestRequest_bodyEncodeErrors(t *testing.T) {
- cases := []struct {
- sling *Sling
- expectedErr error
- }{
- // check that Encode errors are propagated, illegal JSON field
- {New().BodyJSON(FakeModel{Temperature: math.Inf(1)}), errors.New("json: unsupported value: +Inf")},
- }
- for _, c := range cases {
- req, err := c.sling.Request()
- if err == nil || err.Error() != c.expectedErr.Error() {
- t.Errorf("expected error %v, got %v", c.expectedErr, err)
- }
- if req != nil {
- t.Errorf("expected nil Request, got %+v", req)
- }
- }
-}
-
-func TestRequest_headers(t *testing.T) {
- cases := []struct {
- sling *Sling
- expectedHeader map[string][]string
- }{
- {New().Add("authorization", "OAuth key=\"value\""), map[string][]string{"Authorization": []string{"OAuth key=\"value\""}}},
- // header keys should be canonicalized
- {New().Add("content-tYPE", "application/json").Add("User-AGENT", "sling"), map[string][]string{"Content-Type": []string{"application/json"}, "User-Agent": []string{"sling"}}},
- // values for existing keys should be appended
- {New().Add("A", "B").Add("a", "c"), map[string][]string{"A": []string{"B", "c"}}},
- // Add should add to values for keys added by parent Slings
- {New().Add("A", "B").Add("a", "c").New(), map[string][]string{"A": []string{"B", "c"}}},
- {New().Add("A", "B").New().Add("a", "c"), map[string][]string{"A": []string{"B", "c"}}},
- // Add and Set
- {New().Add("A", "B").Set("a", "c"), map[string][]string{"A": []string{"c"}}},
- {New().Set("content-type", "A").Set("Content-Type", "B"), map[string][]string{"Content-Type": []string{"B"}}},
- // Set should replace values received by copying parent Slings
- {New().Set("A", "B").Add("a", "c").New(), map[string][]string{"A": []string{"B", "c"}}},
- {New().Add("A", "B").New().Set("a", "c"), map[string][]string{"A": []string{"c"}}},
- }
- for _, c := range cases {
- req, _ := c.sling.Request()
- // type conversion from Header to alias'd map for deep equality comparison
- headerMap := map[string][]string(req.Header)
- if !reflect.DeepEqual(c.expectedHeader, headerMap) {
- t.Errorf("not DeepEqual: expected %v, got %v", c.expectedHeader, headerMap)
- }
- }
-}
-
-func TestAddQueryStructs(t *testing.T) {
- cases := []struct {
- rawurl string
- queryStructs []interface{}
- expected string
- }{
- {"http://a.io", []interface{}{}, "http://a.io"},
- {"http://a.io", []interface{}{paramsA}, "http://a.io?limit=30"},
- {"http://a.io", []interface{}{paramsA, paramsA}, "http://a.io?limit=30&limit=30"},
- {"http://a.io", []interface{}{paramsA, paramsB}, "http://a.io?count=25&kind_name=recent&limit=30"},
- // don't blow away query values on the rawURL (parsed into RawQuery)
- {"http://a.io?initial=7", []interface{}{paramsA}, "http://a.io?initial=7&limit=30"},
- }
- for _, c := range cases {
- reqURL, _ := url.Parse(c.rawurl)
- addQueryStructs(reqURL, c.queryStructs)
- if reqURL.String() != c.expected {
- t.Errorf("expected %s, got %s", c.expected, reqURL.String())
- }
- }
-}
-
-// Sending
-
-type APIError struct {
- Message string `json:"message"`
- Code int `json:"code"`
-}
-
-func TestDo_onSuccess(t *testing.T) {
- const expectedText = "Some text"
- const expectedFavoriteCount int64 = 24
-
- client, mux, server := testServer()
- defer server.Close()
- mux.HandleFunc("/success", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"text": "Some text", "favorite_count": 24}`)
- })
-
- sling := New().Client(client)
- req, _ := http.NewRequest("GET", "http://example.com/success", nil)
-
- model := new(FakeModel)
- apiError := new(APIError)
- resp, err := sling.Do(req, model, apiError)
-
- if err != nil {
- t.Errorf("expected nil, got %v", err)
- }
- if resp.StatusCode != 200 {
- t.Errorf("expected %d, got %d", 200, resp.StatusCode)
- }
- if model.Text != expectedText {
- t.Errorf("expected %s, got %s", expectedText, model.Text)
- }
- if model.FavoriteCount != expectedFavoriteCount {
- t.Errorf("expected %d, got %d", expectedFavoriteCount, model.FavoriteCount)
- }
-}
-
-func TestDo_onSuccessWithNilValue(t *testing.T) {
- client, mux, server := testServer()
- defer server.Close()
- mux.HandleFunc("/success", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"text": "Some text", "favorite_count": 24}`)
- })
-
- sling := New().Client(client)
- req, _ := http.NewRequest("GET", "http://example.com/success", nil)
-
- apiError := new(APIError)
- resp, err := sling.Do(req, nil, apiError)
-
- if err != nil {
- t.Errorf("expected nil, got %v", err)
- }
- if resp.StatusCode != 200 {
- t.Errorf("expected %d, got %d", 200, resp.StatusCode)
- }
- expected := &APIError{}
- if !reflect.DeepEqual(expected, apiError) {
- t.Errorf("failureV should not be populated, exepcted %v, got %v", expected, apiError)
- }
-}
-
-func TestDo_onFailure(t *testing.T) {
- const expectedMessage = "Invalid argument"
- const expectedCode int = 215
-
- client, mux, server := testServer()
- defer server.Close()
- mux.HandleFunc("/failure", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(400)
- fmt.Fprintf(w, `{"message": "Invalid argument", "code": 215}`)
- })
-
- sling := New().Client(client)
- req, _ := http.NewRequest("GET", "http://example.com/failure", nil)
-
- model := new(FakeModel)
- apiError := new(APIError)
- resp, err := sling.Do(req, model, apiError)
-
- if err != nil {
- t.Errorf("expected nil, got %v", err)
- }
- if resp.StatusCode != 400 {
- t.Errorf("expected %d, got %d", 400, resp.StatusCode)
- }
- if apiError.Message != expectedMessage {
- t.Errorf("expected %s, got %s", expectedMessage, apiError.Message)
- }
- if apiError.Code != expectedCode {
- t.Errorf("expected %d, got %d", expectedCode, apiError.Code)
- }
-}
-
-func TestDo_onFailureWithNilValue(t *testing.T) {
- client, mux, server := testServer()
- defer server.Close()
- mux.HandleFunc("/failure", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(420)
- fmt.Fprintf(w, `{"message": "Enhance your calm", "code": 88}`)
- })
-
- sling := New().Client(client)
- req, _ := http.NewRequest("GET", "http://example.com/failure", nil)
-
- model := new(FakeModel)
- resp, err := sling.Do(req, model, nil)
-
- if err != nil {
- t.Errorf("expected nil, got %v", err)
- }
- if resp.StatusCode != 420 {
- t.Errorf("expected %d, got %d", 420, resp.StatusCode)
- }
- expected := &FakeModel{}
- if !reflect.DeepEqual(expected, model) {
- t.Errorf("successV should not be populated, exepcted %v, got %v", expected, model)
- }
-}
-
-func TestDo_skipDecodingIfContentTypeWrong(t *testing.T) {
- client, mux, server := testServer()
- defer server.Close()
- mux.HandleFunc("/success", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/html")
- fmt.Fprintf(w, `{"text": "Some text", "favorite_count": 24}`)
- })
-
- sling := New().Client(client)
- req, _ := http.NewRequest("GET", "http://example.com/success", nil)
-
- model := new(FakeModel)
- sling.Do(req, model, nil)
-
- expectedModel := &FakeModel{}
- if !reflect.DeepEqual(expectedModel, model) {
- t.Errorf("decoding should have been skipped, Content-Type was incorrect")
- }
-}
-
-func TestReceive_success(t *testing.T) {
- client, mux, server := testServer()
- defer server.Close()
- mux.HandleFunc("/foo/submit", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "POST", r)
- assertQuery(t, map[string]string{"kind_name": "vanilla", "count": "11"}, r)
- assertPostForm(t, map[string]string{"kind_name": "vanilla", "count": "11"}, r)
- w.Header().Set("Content-Type", "application/json")
- fmt.Fprintf(w, `{"text": "Some text", "favorite_count": 24}`)
- })
-
- endpoint := New().Client(client).Base("http://example.com/").Path("foo/").Post("submit")
- // encode url-tagged struct in query params and as post body for testing purposes
- params := FakeParams{KindName: "vanilla", Count: 11}
- model := new(FakeModel)
- apiError := new(APIError)
- resp, err := endpoint.New().QueryStruct(params).BodyForm(params).Receive(model, apiError)
-
- if err != nil {
- t.Errorf("expected nil, got %v", err)
- }
- if resp.StatusCode != 200 {
- t.Errorf("expected %d, got %d", 200, resp.StatusCode)
- }
- expectedModel := &FakeModel{Text: "Some text", FavoriteCount: 24}
- if !reflect.DeepEqual(expectedModel, model) {
- t.Errorf("expected %v, got %v", expectedModel, model)
- }
- expectedAPIError := &APIError{}
- if !reflect.DeepEqual(expectedAPIError, apiError) {
- t.Errorf("failureV should be zero valued, exepcted %v, got %v", expectedAPIError, apiError)
- }
-}
-
-func TestReceive_failure(t *testing.T) {
- client, mux, server := testServer()
- defer server.Close()
- mux.HandleFunc("/foo/submit", func(w http.ResponseWriter, r *http.Request) {
- assertMethod(t, "POST", r)
- assertQuery(t, map[string]string{"kind_name": "vanilla", "count": "11"}, r)
- assertPostForm(t, map[string]string{"kind_name": "vanilla", "count": "11"}, r)
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(429)
- fmt.Fprintf(w, `{"message": "Rate limit exceeded", "code": 88}`)
- })
-
- endpoint := New().Client(client).Base("http://example.com/").Path("foo/").Post("submit")
- // encode url-tagged struct in query params and as post body for testing purposes
- params := FakeParams{KindName: "vanilla", Count: 11}
- model := new(FakeModel)
- apiError := new(APIError)
- resp, err := endpoint.New().QueryStruct(params).BodyForm(params).Receive(model, apiError)
-
- if err != nil {
- t.Errorf("expected nil, got %v", err)
- }
- if resp.StatusCode != 429 {
- t.Errorf("expected %d, got %d", 429, resp.StatusCode)
- }
- expectedAPIError := &APIError{Message: "Rate limit exceeded", Code: 88}
- if !reflect.DeepEqual(expectedAPIError, apiError) {
- t.Errorf("expected %v, got %v", expectedAPIError, apiError)
- }
- expectedModel := &FakeModel{}
- if !reflect.DeepEqual(expectedModel, model) {
- t.Errorf("successV should not be zero valued, expected %v, got %v", expectedModel, model)
- }
-}
-
-func TestReceive_errorCreatingRequest(t *testing.T) {
- expectedErr := errors.New("json: unsupported value: +Inf")
- resp, err := New().BodyJSON(FakeModel{Temperature: math.Inf(1)}).Receive(nil, nil)
- if err == nil || err.Error() != expectedErr.Error() {
- t.Errorf("expected %v, got %v", expectedErr, err)
- }
- if resp != nil {
- t.Errorf("expected nil resp, got %v", resp)
- }
-}
-
-// Testing Utils
-
-// testServer returns an http Client, ServeMux, and Server. The client proxies
-// requests to the server and handlers can be registered on the mux to handle
-// requests. The caller must close the test server.
-func testServer() (*http.Client, *http.ServeMux, *httptest.Server) {
- mux := http.NewServeMux()
- server := httptest.NewServer(mux)
- transport := &http.Transport{
- Proxy: func(req *http.Request) (*url.URL, error) {
- return url.Parse(server.URL)
- },
- }
- client := &http.Client{Transport: transport}
- return client, mux, server
-}
-
-func assertMethod(t *testing.T, expectedMethod string, req *http.Request) {
- if actualMethod := req.Method; actualMethod != expectedMethod {
- t.Errorf("expected method %s, got %s", expectedMethod, actualMethod)
- }
-}
-
-// assertQuery tests that the Request has the expected url query key/val pairs
-func assertQuery(t *testing.T, expected map[string]string, req *http.Request) {
- queryValues := req.URL.Query() // net/url Values is a map[string][]string
- expectedValues := url.Values{}
- for key, value := range expected {
- expectedValues.Add(key, value)
- }
- if !reflect.DeepEqual(expectedValues, queryValues) {
- t.Errorf("expected parameters %v, got %v", expected, req.URL.RawQuery)
- }
-}
-
-// assertPostForm tests that the Request has the expected key values pairs url
-// encoded in its Body
-func assertPostForm(t *testing.T, expected map[string]string, req *http.Request) {
- req.ParseForm() // parses request Body to put url.Values in r.Form/r.PostForm
- expectedValues := url.Values{}
- for key, value := range expected {
- expectedValues.Add(key, value)
- }
- if !reflect.DeepEqual(expectedValues, req.PostForm) {
- t.Errorf("expected parameters %v, got %v", expected, req.PostForm)
- }
-}
diff --git a/vendor/src/github.com/die-net/lrucache/LICENSE b/vendor/src/github.com/die-net/lrucache/LICENSE
deleted file mode 100644
index 8dada3e..0000000
--- a/vendor/src/github.com/die-net/lrucache/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/src/github.com/die-net/lrucache/README.md b/vendor/src/github.com/die-net/lrucache/README.md
deleted file mode 100644
index 59ee253..0000000
--- a/vendor/src/github.com/die-net/lrucache/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
-LruCache [![Build Status](https://travis-ci.org/die-net/lrucache.svg?branch=master)](https://travis-ci.org/die-net/lrucache) [![Coverage Status](https://coveralls.io/repos/github/die-net/lrucache/badge.svg?branch=master)](https://coveralls.io/github/die-net/lrucache?branch=master)
-========
-
-LruCache is a thread-safe, in-memory [httpcache.Cache](https://github.com/gregjones/httpcache) implementation that evicts the least recently used entries when a byte size limit or optional max age would be exceeded.
-
-Using the included [TwoTier](https://github.com/die-net/lrucache/tree/master/twotier) wrapper, it could also be used as a small and fast cache for popular objects, falling back to a larger and slower cache (such as [s3cache](https://github.com/sourcegraph/s3cache)) for less popular ones.
-
-Also see the godoc API documentation for [LruCache](https://godoc.org/github.com/die-net/lrucache) or [TwoTier](https://godoc.org/github.com/die-net/lrucache/twotier).
-
-Included are a test suite with close to 100% test coverage and a parallel benchmark suite that shows individual Set, Get, and Delete operations take under 400ns to complete.
-
-License
--------
-
-Copyright 2016 Aaron Hopkins and contributors
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
diff --git a/vendor/src/github.com/die-net/lrucache/lrucache.go b/vendor/src/github.com/die-net/lrucache/lrucache.go
deleted file mode 100644
index 0c3f6b8..0000000
--- a/vendor/src/github.com/die-net/lrucache/lrucache.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Package lrucache provides a byte-size-limited implementation of
-// httpcache.Cache that stores data in memory.
-package lrucache
-
-import (
- "container/list"
- "sync"
- "time"
-)
-
-// LruCache is a thread-safe, in-memory httpcache.Cache that evicts the
-// least recently used entries from memory when either MaxSize (in bytes)
-// limit would be exceeded or (if set) the entries are older than MaxAge (in
-// seconds). Use the New constructor to create one.
-type LruCache struct {
- MaxSize int64
- MaxAge int64
-
- mu sync.Mutex
- cache map[string]*list.Element
- lru *list.List // Front is least-recent
- size int64
-}
-
-// New creates an LruCache that will restrict itself to maxSize bytes of
-// memory. If maxAge > 0, entries will also be expired after maxAge
-// seconds.
-func New(maxSize int64, maxAge int64) *LruCache {
- c := &LruCache{
- MaxSize: maxSize,
- MaxAge: maxAge,
- lru: list.New(),
- cache: make(map[string]*list.Element),
- }
-
- return c
-}
-
-// Get returns the []byte representation of a cached response and a bool
-// set to true if the key was found.
-func (c *LruCache) Get(key string) ([]byte, bool) {
- c.mu.Lock()
-
- le, ok := c.cache[key]
- if !ok {
- c.mu.Unlock() // Avoiding defer overhead
- return nil, false
- }
-
- if c.MaxAge > 0 && le.Value.(*entry).expires <= time.Now().Unix() {
- c.deleteElement(le)
- c.maybeDeleteOldest()
-
- c.mu.Unlock() // Avoiding defer overhead
- return nil, false
- }
-
- c.lru.MoveToBack(le)
- value := le.Value.(*entry).value
-
- c.mu.Unlock() // Avoiding defer overhead
- return value, true
-}
-
-// Set stores the []byte representation of a response for a given key.
-func (c *LruCache) Set(key string, value []byte) {
- c.mu.Lock()
-
- expires := int64(0)
- if c.MaxAge > 0 {
- expires = time.Now().Unix() + c.MaxAge
- }
-
- if le, ok := c.cache[key]; ok {
- c.lru.MoveToBack(le)
- e := le.Value.(*entry)
- c.size += int64(len(value)) - int64(len(e.value))
- e.value = value
- e.expires = expires
- } else {
- e := &entry{key: key, value: value, expires: expires}
- c.cache[key] = c.lru.PushBack(e)
- c.size += e.size()
- }
-
- c.maybeDeleteOldest()
-
- c.mu.Unlock()
-}
-
-// Delete removes the value associated with a key.
-func (c *LruCache) Delete(key string) {
- c.mu.Lock()
-
- if le, ok := c.cache[key]; ok {
- c.deleteElement(le)
- }
-
- c.mu.Unlock()
-}
-
-// Size returns the estimated current memory usage of LruCache.
-func (c *LruCache) Size() int64 {
- c.mu.Lock()
- size := c.size
- c.mu.Unlock()
-
- return size
-}
-
-func (c *LruCache) maybeDeleteOldest() {
- for c.size > c.MaxSize {
- le := c.lru.Front()
- if le == nil {
- panic("LruCache: non-zero size but empty lru")
- }
- c.deleteElement(le)
- }
-
- if c.MaxAge > 0 {
- now := time.Now().Unix()
- for le := c.lru.Front(); le != nil && le.Value.(*entry).expires <= now; le = c.lru.Front() {
- c.deleteElement(le)
- }
- }
-}
-
-func (c *LruCache) deleteElement(le *list.Element) {
- c.lru.Remove(le)
- e := le.Value.(*entry)
- delete(c.cache, e.key)
- c.size -= e.size()
-}
-
-// Rough estimate of map + entry object + string + byte slice overheads in bytes.
-const entryOverhead = 168
-
-type entry struct {
- key string
- value []byte
- expires int64
-}
-
-func (e *entry) size() int64 {
- return entryOverhead + int64(len(e.key)) + int64(len(e.value))
-}
diff --git a/vendor/src/github.com/die-net/lrucache/lrucache_test.go b/vendor/src/github.com/die-net/lrucache/lrucache_test.go
deleted file mode 100644
index dc42a23..0000000
--- a/vendor/src/github.com/die-net/lrucache/lrucache_test.go
+++ /dev/null
@@ -1,235 +0,0 @@
-package lrucache
-
-import (
- "github.com/gregjones/httpcache"
- "github.com/stretchr/testify/assert"
- "math/rand"
- "runtime"
- "strconv"
- "testing"
- "time"
-)
-
-var entries = []struct {
- key string
- value string
-}{
- {"1", "one"},
- {"2", "two"},
- {"3", "three"},
- {"4", "four"},
- {"5", "five"},
-}
-
-func TestInterface(t *testing.T) {
- var h httpcache.Cache
- h = New(1000000, 0)
- if assert.NotNil(t, h) {
- _, ok := h.Get("missing")
- assert.False(t, ok)
- }
-}
-
-func TestCache(t *testing.T) {
- c := New(1000000, 0)
-
- for _, e := range entries {
- c.Set(e.key, []byte(e.value))
- }
-
- c.Delete("missing")
- _, ok := c.Get("missing")
- assert.False(t, ok)
-
- for _, e := range entries {
- value, ok := c.Get(e.key)
- if assert.True(t, ok) {
- assert.Equal(t, string(e.value), string(value))
- }
- }
-
- for _, e := range entries {
- c.Delete(e.key)
-
- _, ok := c.Get(e.key)
- assert.False(t, ok)
- }
-}
-
-func TestSize(t *testing.T) {
- c := New(1000000, 0)
- assert.Equal(t, int64(0), c.size)
-
- // Check that size is overhead + len(key) + len(value)
- c.Set("some", []byte("text"))
- assert.Equal(t, int64(entryOverhead+8), c.size)
-
- // Replace key
- c.Set("some", []byte("longer text"))
- assert.Equal(t, int64(entryOverhead+15), c.size)
-
- assert.Equal(t, c.size, c.Size())
-
- c.Delete("some")
- assert.Equal(t, int64(0), c.size)
-}
-
-func TestMaxSize(t *testing.T) {
- c := New(entryOverhead*2+20, 0)
-
- for _, e := range entries {
- c.Set(e.key, []byte(e.value))
- }
-
- // Make sure only the last two entries were kept.
- assert.Equal(t, int64(entryOverhead*2+10), c.size)
-}
-
-func TestMaxAge(t *testing.T) {
- c := New(1000000, 86400)
-
- now := time.Now().Unix()
- expected := now + 86400
-
- // Add one expired entry
- c.Set("foo", []byte("bar"))
- c.lru.Back().Value.(*entry).expires = now
-
- // Set a few and verify expiration times
- for _, s := range entries {
- c.Set(s.key, []byte(s.value))
- e := c.lru.Back().Value.(*entry)
- assert.True(t, e.expires >= expected && e.expires <= expected+10)
- }
-
- // Make sure we can get them all
- for _, s := range entries {
- _, ok := c.Get(s.key)
- assert.True(t, ok)
- }
-
- // Make sure only non-expired entries are still in the cache
- assert.Equal(t, int64(entryOverhead*5+24), c.size)
-
- // Expire all entries
- for _, s := range entries {
- le, ok := c.cache[s.key]
- if assert.True(t, ok) {
- le.Value.(*entry).expires = now
- }
- }
-
- // Get one expired entry, which should clear all expired entries
- _, ok := c.Get("3")
- assert.False(t, ok)
- assert.Equal(t, int64(0), c.size)
-}
-
-func TestRace(t *testing.T) {
- c := New(100000, 0)
-
- for worker := 0; worker < 8; worker++ {
- go testRaceWorker(c)
- }
-}
-
-func testRaceWorker(c *LruCache) {
- v := []byte("value")
-
- for n := 0; n < 1000; n++ {
- c.Set(randKey(100), v)
- _, _ = c.Get(randKey(200))
- c.Delete(randKey(100))
- _ = c.Size()
- }
-}
-
-func TestOverhead(t *testing.T) {
- if testing.Short() || !testing.Verbose() {
- t.SkipNow()
- }
-
- num := 1000000
- c := New(int64(num)*1000, 0)
-
- mem := readMem()
-
- for n := 0; n < num; n++ {
- c.Set(strconv.Itoa(n), []byte(randKey(1000000000)))
- }
-
- mem = readMem() - mem
- stored := c.Size() - int64(num)*entryOverhead
- t.Log("entryOverhead =", (int64(mem)-stored)/int64(num))
-}
-
-func BenchmarkSet(b *testing.B) {
- v := []byte("value")
-
- c := benchSetup(b, 10000000, 10000)
-
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- c.Set(randKey(10000), v)
- }
- })
-}
-
-func BenchmarkGet(b *testing.B) {
- c := benchSetup(b, 10000000, 10000)
-
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- _, _ = c.Get(randKey(20000))
- }
- })
-}
-
-func BenchmarkSize(b *testing.B) {
- c := benchSetup(b, 10000000, 10000)
-
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- _ = c.Size()
- }
- })
-}
-
-func BenchmarkSetGetDeleteSize(b *testing.B) {
- v := []byte("value")
-
- c := benchSetup(b, 10000000, 10000)
-
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- c.Set(randKey(10000), v)
- _, _ = c.Get(randKey(20000))
- c.Delete(randKey(10000))
- _ = c.Size()
- }
- })
-}
-
-func benchSetup(b *testing.B, size int64, entries int) *LruCache {
- c := New(size, 0)
-
- v := []byte("value")
- for i := 0; i < entries; i++ {
- c.Set(strconv.Itoa(i), v)
- }
-
- b.ResetTimer()
-
- return c
-}
-
-func randKey(n int32) string {
- return strconv.Itoa(int(rand.Int31n(n)))
-}
-
-func readMem() int64 {
- m := runtime.MemStats{}
- runtime.GC()
- runtime.ReadMemStats(&m)
- return int64(m.Alloc)
-}
diff --git a/vendor/src/github.com/die-net/lrucache/twotier/README.md b/vendor/src/github.com/die-net/lrucache/twotier/README.md
deleted file mode 100644
index ec390a7..0000000
--- a/vendor/src/github.com/die-net/lrucache/twotier/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-TwoTier [![Build Status](https://travis-ci.org/die-net/lrucache.svg?branch=master)](https://travis-ci.org/die-net/lrucache)
-========
-
-TwoTier is an [httpcache.Cache](https://github.com/gregjones/httpcache) implementation that wraps two other httpcache.Cache instances,
-allowing you to use both a small and fast cache (such as an in-memory [LruCache](https://github.com/die-net/lrucache) or [memcache](https://github.com/gregjones/httpcache/tree/master/memcache)) for popular objects and
-fall back to a larger and slower cache (such as [s3cache](https://github.com/sourcegraph/s3cache)) for less popular ones.
-
-While TwoTier passes Set and Delete operations to both tiers, it can't make strong guarantees that the contents of both caches will always remain in sync. If you are caching URLs that don't change often or don't mind that you sometimes get different versions of the same URL's contents, this is probably fine. When using LruCache as the first-tier cache, you can limit how long it can disagree with the second-tier cache by setting its MaxAge parameter to the maximum time you are comfortable with them disagreeing.
-
-See the godoc API documentation for [TwoTier](https://godoc.org/github.com/die-net/lrucache/twotier) or [LruCache](https://godoc.org/github.com/die-net/lrucache).
-
-There is a test-suite included that has close to 100% test coverage on TwoTier's relatively simple functionality.
-
-License
--------
-
-Copyright 2016 Aaron Hopkins and contributors
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
diff --git a/vendor/src/github.com/die-net/lrucache/twotier/twotier.go b/vendor/src/github.com/die-net/lrucache/twotier/twotier.go
deleted file mode 100644
index d8780b0..0000000
--- a/vendor/src/github.com/die-net/lrucache/twotier/twotier.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Package twotier provides a wrapper for two httpcache.Cache instances,
-// allowing you to use both a small and fast cache for popular objects and
-// fall back to a larger and slower cache for less popular ones.
-package twotier
-
-import (
- "github.com/gregjones/httpcache"
-)
-
-// TwoTier creates a two-tiered cache out of two httpcache.Cache instances.
-// Reads are favored from first, and writes affect both first and second.
-type TwoTier struct {
- first httpcache.Cache
- second httpcache.Cache
-}
-
-// New creates a TwoTier. Both first and second must be non-nil.
-func New(first httpcache.Cache, second httpcache.Cache) *TwoTier {
- if first == nil || second == nil || first == second {
- return nil
- }
- return &TwoTier{first: first, second: second}
-}
-
-// Get returns the []byte representation of a cached response and a bool set
-// to true if the key was found. It tries the first tier cache, and if
-// that's not successful, copies the result from the second tier into the
-// first tier.
-func (c *TwoTier) Get(key string) ([]byte, bool) {
- if value, ok := c.first.Get(key); ok {
- return value, true
- }
-
- value, ok := c.second.Get(key)
- if !ok {
- return nil, false
- }
-
- c.first.Set(key, value)
-
- return value, true
-}
-
-// Set stores the []byte representation of a response for a given key into
-// the second tier cache, and deletes the cache entry from the first tier
-// cache.
-func (c *TwoTier) Set(key string, value []byte) {
- c.second.Set(key, value)
- c.first.Delete(key)
-}
-
-// Delete removes the value associated with a key from both the first and
-// second tier caches.
-func (c *TwoTier) Delete(key string) {
- c.second.Delete(key)
- c.first.Delete(key)
-}
diff --git a/vendor/src/github.com/die-net/lrucache/twotier/twotier_test.go b/vendor/src/github.com/die-net/lrucache/twotier/twotier_test.go
deleted file mode 100644
index 49189f2..0000000
--- a/vendor/src/github.com/die-net/lrucache/twotier/twotier_test.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package twotier
-
-import (
- "github.com/die-net/lrucache"
- "github.com/gregjones/httpcache"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestInterface(t *testing.T) {
- var h httpcache.Cache
- h = twoNew(1000000, 1000000)
- if assert.NotNil(t, h) {
- _, ok := h.Get("missing")
- assert.Equal(t, ok, false)
- }
-}
-
-func TestNew(t *testing.T) {
- // Check New's validation.
- c := lrucache.New(1000000, 0)
- assert.Nil(t, New(nil, nil))
- assert.Nil(t, New(nil, c))
- assert.Nil(t, New(c, nil))
- assert.Nil(t, New(c, c))
-
- assert.NotNil(t, New(c, lrucache.New(1000000, 0)))
-}
-
-func TestGet(t *testing.T) {
- c := twoNew(1000000, 1000000)
-
- // Try a cache miss.
- _, ok := c.Get("foo")
- assert.Equal(t, ok, false)
-
- // Add something to secondary cache, and make sure we can see it.
- c.second.Set("foo", []byte("bar"))
- v, _ := c.Get("foo")
- assert.Equal(t, string(v), "bar")
-
- // And it should've been written to first.
- v, _ = c.first.Get("foo")
- assert.Equal(t, string(v), "bar")
-
- // Change secondary cache and we should still see old value.
- c.second.Set("foo", []byte("qux"))
- v, _ = c.Get("foo")
- assert.Equal(t, string(v), "bar")
-
- // Pretend first expired that value and we should see new value.
- c.first.Delete("foo")
- v, _ = c.Get("foo")
- assert.Equal(t, string(v), "qux")
-}
-
-func TestSet(t *testing.T) {
- c := twoNew(1000000, 1000000)
-
- // Check that Set correctly overwrites second and deletes first.
- c.first.Set("foo", []byte("bar"))
- c.second.Set("foo", []byte("baz"))
- c.Set("foo", []byte("qux"))
- _, ok := c.first.Get("foo")
- assert.Equal(t, ok, false)
- v, _ := c.second.Get("foo")
- assert.Equal(t, string(v), "qux")
-}
-
-func TestDelete(t *testing.T) {
- c := twoNew(1000000, 1000000)
-
- // Check that Delete correctly deletes first and second.
- c.first.Set("foo", []byte("bar"))
- c.second.Set("foo", []byte("baz"))
- c.Delete("foo")
- _, ok := c.first.Get("foo")
- assert.Equal(t, ok, false)
- _, ok = c.second.Get("foo")
- assert.Equal(t, ok, false)
-}
-
-func twoNew(firstSize, secondSize int64) *TwoTier {
- return New(lrucache.New(firstSize, 0), lrucache.New(secondSize, 0))
-}
diff --git a/vendor/src/github.com/garyburd/redigo/internal/commandinfo.go b/vendor/src/github.com/garyburd/redigo/internal/commandinfo.go
deleted file mode 100644
index 11e5842..0000000
--- a/vendor/src/github.com/garyburd/redigo/internal/commandinfo.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2014 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package internal // import "github.com/garyburd/redigo/internal"
-
-import (
- "strings"
-)
-
-const (
- WatchState = 1 << iota
- MultiState
- SubscribeState
- MonitorState
-)
-
-type CommandInfo struct {
- Set, Clear int
-}
-
-var commandInfos = map[string]CommandInfo{
- "WATCH": {Set: WatchState},
- "UNWATCH": {Clear: WatchState},
- "MULTI": {Set: MultiState},
- "EXEC": {Clear: WatchState | MultiState},
- "DISCARD": {Clear: WatchState | MultiState},
- "PSUBSCRIBE": {Set: SubscribeState},
- "SUBSCRIBE": {Set: SubscribeState},
- "MONITOR": {Set: MonitorState},
-}
-
-func init() {
- for n, ci := range commandInfos {
- commandInfos[strings.ToLower(n)] = ci
- }
-}
-
-func LookupCommandInfo(commandName string) CommandInfo {
- if ci, ok := commandInfos[commandName]; ok {
- return ci
- }
- return commandInfos[strings.ToUpper(commandName)]
-}
diff --git a/vendor/src/github.com/garyburd/redigo/internal/commandinfo_test.go b/vendor/src/github.com/garyburd/redigo/internal/commandinfo_test.go
deleted file mode 100644
index 118e94b..0000000
--- a/vendor/src/github.com/garyburd/redigo/internal/commandinfo_test.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package internal
-
-import "testing"
-
-func TestLookupCommandInfo(t *testing.T) {
- for _, n := range []string{"watch", "WATCH", "wAtch"} {
- if LookupCommandInfo(n) == (CommandInfo{}) {
- t.Errorf("LookupCommandInfo(%q) = CommandInfo{}, expected non-zero value", n)
- }
- }
-}
-
-func benchmarkLookupCommandInfo(b *testing.B, names ...string) {
- for i := 0; i < b.N; i++ {
- for _, c := range names {
- LookupCommandInfo(c)
- }
- }
-}
-
-func BenchmarkLookupCommandInfoCorrectCase(b *testing.B) {
- benchmarkLookupCommandInfo(b, "watch", "WATCH", "monitor", "MONITOR")
-}
-
-func BenchmarkLookupCommandInfoMixedCase(b *testing.B) {
- benchmarkLookupCommandInfo(b, "wAtch", "WeTCH", "monItor", "MONiTOR")
-}
diff --git a/vendor/src/github.com/garyburd/redigo/internal/redistest/testdb.go b/vendor/src/github.com/garyburd/redigo/internal/redistest/testdb.go
deleted file mode 100644
index b6f205b..0000000
--- a/vendor/src/github.com/garyburd/redigo/internal/redistest/testdb.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2014 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-// Package redistest contains utilities for writing Redigo tests.
-package redistest
-
-import (
- "errors"
- "time"
-
- "github.com/garyburd/redigo/redis"
-)
-
-type testConn struct {
- redis.Conn
-}
-
-func (t testConn) Close() error {
- _, err := t.Conn.Do("SELECT", "9")
- if err != nil {
- return nil
- }
- _, err = t.Conn.Do("FLUSHDB")
- if err != nil {
- return err
- }
- return t.Conn.Close()
-}
-
-// Dial dials the local Redis server and selects database 9. To prevent
-// stomping on real data, DialTestDB fails if database 9 contains data. The
-// returned connection flushes database 9 on close.
-func Dial() (redis.Conn, error) {
- c, err := redis.DialTimeout("tcp", ":6379", 0, 1*time.Second, 1*time.Second)
- if err != nil {
- return nil, err
- }
-
- _, err = c.Do("SELECT", "9")
- if err != nil {
- c.Close()
- return nil, err
- }
-
- n, err := redis.Int(c.Do("DBSIZE"))
- if err != nil {
- c.Close()
- return nil, err
- }
-
- if n != 0 {
- c.Close()
- return nil, errors.New("database #9 is not empty, test can not continue")
- }
-
- return testConn{c}, nil
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/conn.go b/vendor/src/github.com/garyburd/redigo/redis/conn.go
deleted file mode 100644
index ed358c6..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/conn.go
+++ /dev/null
@@ -1,570 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "net"
- "net/url"
- "regexp"
- "strconv"
- "sync"
- "time"
-)
-
-// conn is the low-level implementation of Conn
-type conn struct {
-
- // Shared
- mu sync.Mutex
- pending int
- err error
- conn net.Conn
-
- // Read
- readTimeout time.Duration
- br *bufio.Reader
-
- // Write
- writeTimeout time.Duration
- bw *bufio.Writer
-
- // Scratch space for formatting argument length.
- // '*' or '$', length, "\r\n"
- lenScratch [32]byte
-
- // Scratch space for formatting integers and floats.
- numScratch [40]byte
-}
-
-// DialTimeout acts like Dial but takes timeouts for establishing the
-// connection to the server, writing a command and reading a reply.
-//
-// Deprecated: Use Dial with options instead.
-func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) {
- return Dial(network, address,
- DialConnectTimeout(connectTimeout),
- DialReadTimeout(readTimeout),
- DialWriteTimeout(writeTimeout))
-}
-
-// DialOption specifies an option for dialing a Redis server.
-type DialOption struct {
- f func(*dialOptions)
-}
-
-type dialOptions struct {
- readTimeout time.Duration
- writeTimeout time.Duration
- dial func(network, addr string) (net.Conn, error)
- db int
- password string
-}
-
-// DialReadTimeout specifies the timeout for reading a single command reply.
-func DialReadTimeout(d time.Duration) DialOption {
- return DialOption{func(do *dialOptions) {
- do.readTimeout = d
- }}
-}
-
-// DialWriteTimeout specifies the timeout for writing a single command.
-func DialWriteTimeout(d time.Duration) DialOption {
- return DialOption{func(do *dialOptions) {
- do.writeTimeout = d
- }}
-}
-
-// DialConnectTimeout specifies the timeout for connecting to the Redis server.
-func DialConnectTimeout(d time.Duration) DialOption {
- return DialOption{func(do *dialOptions) {
- dialer := net.Dialer{Timeout: d}
- do.dial = dialer.Dial
- }}
-}
-
-// DialNetDial specifies a custom dial function for creating TCP
-// connections. If this option is left out, then net.Dial is
-// used. DialNetDial overrides DialConnectTimeout.
-func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption {
- return DialOption{func(do *dialOptions) {
- do.dial = dial
- }}
-}
-
-// DialDatabase specifies the database to select when dialing a connection.
-func DialDatabase(db int) DialOption {
- return DialOption{func(do *dialOptions) {
- do.db = db
- }}
-}
-
-// DialPassword specifies the password to use when connecting to
-// the Redis server.
-func DialPassword(password string) DialOption {
- return DialOption{func(do *dialOptions) {
- do.password = password
- }}
-}
-
-// Dial connects to the Redis server at the given network and
-// address using the specified options.
-func Dial(network, address string, options ...DialOption) (Conn, error) {
- do := dialOptions{
- dial: net.Dial,
- }
- for _, option := range options {
- option.f(&do)
- }
-
- netConn, err := do.dial(network, address)
- if err != nil {
- return nil, err
- }
- c := &conn{
- conn: netConn,
- bw: bufio.NewWriter(netConn),
- br: bufio.NewReader(netConn),
- readTimeout: do.readTimeout,
- writeTimeout: do.writeTimeout,
- }
-
- if do.password != "" {
- if _, err := c.Do("AUTH", do.password); err != nil {
- netConn.Close()
- return nil, err
- }
- }
-
- if do.db != 0 {
- if _, err := c.Do("SELECT", do.db); err != nil {
- netConn.Close()
- return nil, err
- }
- }
-
- return c, nil
-}
-
-var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`)
-
-// DialURL connects to a Redis server at the given URL using the Redis
-// URI scheme. URLs should follow the draft IANA specification for the
-// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis).
-func DialURL(rawurl string, options ...DialOption) (Conn, error) {
- u, err := url.Parse(rawurl)
- if err != nil {
- return nil, err
- }
-
- if u.Scheme != "redis" {
- return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme)
- }
-
- // As per the IANA draft spec, the host defaults to localhost and
- // the port defaults to 6379.
- host, port, err := net.SplitHostPort(u.Host)
- if err != nil {
- // assume port is missing
- host = u.Host
- port = "6379"
- }
- if host == "" {
- host = "localhost"
- }
- address := net.JoinHostPort(host, port)
-
- if u.User != nil {
- password, isSet := u.User.Password()
- if isSet {
- options = append(options, DialPassword(password))
- }
- }
-
- match := pathDBRegexp.FindStringSubmatch(u.Path)
- if len(match) == 2 {
- db := 0
- if len(match[1]) > 0 {
- db, err = strconv.Atoi(match[1])
- if err != nil {
- return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
- }
- }
- if db != 0 {
- options = append(options, DialDatabase(db))
- }
- } else if u.Path != "" {
- return nil, fmt.Errorf("invalid database: %s", u.Path[1:])
- }
-
- return Dial("tcp", address, options...)
-}
-
-// NewConn returns a new Redigo connection for the given net connection.
-func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn {
- return &conn{
- conn: netConn,
- bw: bufio.NewWriter(netConn),
- br: bufio.NewReader(netConn),
- readTimeout: readTimeout,
- writeTimeout: writeTimeout,
- }
-}
-
-func (c *conn) Close() error {
- c.mu.Lock()
- err := c.err
- if c.err == nil {
- c.err = errors.New("redigo: closed")
- err = c.conn.Close()
- }
- c.mu.Unlock()
- return err
-}
-
-func (c *conn) fatal(err error) error {
- c.mu.Lock()
- if c.err == nil {
- c.err = err
- // Close connection to force errors on subsequent calls and to unblock
- // other reader or writer.
- c.conn.Close()
- }
- c.mu.Unlock()
- return err
-}
-
-func (c *conn) Err() error {
- c.mu.Lock()
- err := c.err
- c.mu.Unlock()
- return err
-}
-
-func (c *conn) writeLen(prefix byte, n int) error {
- c.lenScratch[len(c.lenScratch)-1] = '\n'
- c.lenScratch[len(c.lenScratch)-2] = '\r'
- i := len(c.lenScratch) - 3
- for {
- c.lenScratch[i] = byte('0' + n%10)
- i -= 1
- n = n / 10
- if n == 0 {
- break
- }
- }
- c.lenScratch[i] = prefix
- _, err := c.bw.Write(c.lenScratch[i:])
- return err
-}
-
-func (c *conn) writeString(s string) error {
- c.writeLen('$', len(s))
- c.bw.WriteString(s)
- _, err := c.bw.WriteString("\r\n")
- return err
-}
-
-func (c *conn) writeBytes(p []byte) error {
- c.writeLen('$', len(p))
- c.bw.Write(p)
- _, err := c.bw.WriteString("\r\n")
- return err
-}
-
-func (c *conn) writeInt64(n int64) error {
- return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10))
-}
-
-func (c *conn) writeFloat64(n float64) error {
- return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64))
-}
-
-func (c *conn) writeCommand(cmd string, args []interface{}) (err error) {
- c.writeLen('*', 1+len(args))
- err = c.writeString(cmd)
- for _, arg := range args {
- if err != nil {
- break
- }
- switch arg := arg.(type) {
- case string:
- err = c.writeString(arg)
- case []byte:
- err = c.writeBytes(arg)
- case int:
- err = c.writeInt64(int64(arg))
- case int64:
- err = c.writeInt64(arg)
- case float64:
- err = c.writeFloat64(arg)
- case bool:
- if arg {
- err = c.writeString("1")
- } else {
- err = c.writeString("0")
- }
- case nil:
- err = c.writeString("")
- default:
- var buf bytes.Buffer
- fmt.Fprint(&buf, arg)
- err = c.writeBytes(buf.Bytes())
- }
- }
- return err
-}
-
-type protocolError string
-
-func (pe protocolError) Error() string {
- return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe))
-}
-
-func (c *conn) readLine() ([]byte, error) {
- p, err := c.br.ReadSlice('\n')
- if err == bufio.ErrBufferFull {
- return nil, protocolError("long response line")
- }
- if err != nil {
- return nil, err
- }
- i := len(p) - 2
- if i < 0 || p[i] != '\r' {
- return nil, protocolError("bad response line terminator")
- }
- return p[:i], nil
-}
-
-// parseLen parses bulk string and array lengths.
-func parseLen(p []byte) (int, error) {
- if len(p) == 0 {
- return -1, protocolError("malformed length")
- }
-
- if p[0] == '-' && len(p) == 2 && p[1] == '1' {
- // handle $-1 and $-1 null replies.
- return -1, nil
- }
-
- var n int
- for _, b := range p {
- n *= 10
- if b < '0' || b > '9' {
- return -1, protocolError("illegal bytes in length")
- }
- n += int(b - '0')
- }
-
- return n, nil
-}
-
-// parseInt parses an integer reply.
-func parseInt(p []byte) (interface{}, error) {
- if len(p) == 0 {
- return 0, protocolError("malformed integer")
- }
-
- var negate bool
- if p[0] == '-' {
- negate = true
- p = p[1:]
- if len(p) == 0 {
- return 0, protocolError("malformed integer")
- }
- }
-
- var n int64
- for _, b := range p {
- n *= 10
- if b < '0' || b > '9' {
- return 0, protocolError("illegal bytes in length")
- }
- n += int64(b - '0')
- }
-
- if negate {
- n = -n
- }
- return n, nil
-}
-
-var (
- okReply interface{} = "OK"
- pongReply interface{} = "PONG"
-)
-
-func (c *conn) readReply() (interface{}, error) {
- line, err := c.readLine()
- if err != nil {
- return nil, err
- }
- if len(line) == 0 {
- return nil, protocolError("short response line")
- }
- switch line[0] {
- case '+':
- switch {
- case len(line) == 3 && line[1] == 'O' && line[2] == 'K':
- // Avoid allocation for frequent "+OK" response.
- return okReply, nil
- case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G':
- // Avoid allocation in PING command benchmarks :)
- return pongReply, nil
- default:
- return string(line[1:]), nil
- }
- case '-':
- return Error(string(line[1:])), nil
- case ':':
- return parseInt(line[1:])
- case '$':
- n, err := parseLen(line[1:])
- if n < 0 || err != nil {
- return nil, err
- }
- p := make([]byte, n)
- _, err = io.ReadFull(c.br, p)
- if err != nil {
- return nil, err
- }
- if line, err := c.readLine(); err != nil {
- return nil, err
- } else if len(line) != 0 {
- return nil, protocolError("bad bulk string format")
- }
- return p, nil
- case '*':
- n, err := parseLen(line[1:])
- if n < 0 || err != nil {
- return nil, err
- }
- r := make([]interface{}, n)
- for i := range r {
- r[i], err = c.readReply()
- if err != nil {
- return nil, err
- }
- }
- return r, nil
- }
- return nil, protocolError("unexpected response line")
-}
-
-func (c *conn) Send(cmd string, args ...interface{}) error {
- c.mu.Lock()
- c.pending += 1
- c.mu.Unlock()
- if c.writeTimeout != 0 {
- c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
- }
- if err := c.writeCommand(cmd, args); err != nil {
- return c.fatal(err)
- }
- return nil
-}
-
-func (c *conn) Flush() error {
- if c.writeTimeout != 0 {
- c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
- }
- if err := c.bw.Flush(); err != nil {
- return c.fatal(err)
- }
- return nil
-}
-
-func (c *conn) Receive() (reply interface{}, err error) {
- if c.readTimeout != 0 {
- c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
- }
- if reply, err = c.readReply(); err != nil {
- return nil, c.fatal(err)
- }
- // When using pub/sub, the number of receives can be greater than the
- // number of sends. To enable normal use of the connection after
- // unsubscribing from all channels, we do not decrement pending to a
- // negative value.
- //
- // The pending field is decremented after the reply is read to handle the
- // case where Receive is called before Send.
- c.mu.Lock()
- if c.pending > 0 {
- c.pending -= 1
- }
- c.mu.Unlock()
- if err, ok := reply.(Error); ok {
- return nil, err
- }
- return
-}
-
-func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) {
- c.mu.Lock()
- pending := c.pending
- c.pending = 0
- c.mu.Unlock()
-
- if cmd == "" && pending == 0 {
- return nil, nil
- }
-
- if c.writeTimeout != 0 {
- c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout))
- }
-
- if cmd != "" {
- if err := c.writeCommand(cmd, args); err != nil {
- return nil, c.fatal(err)
- }
- }
-
- if err := c.bw.Flush(); err != nil {
- return nil, c.fatal(err)
- }
-
- if c.readTimeout != 0 {
- c.conn.SetReadDeadline(time.Now().Add(c.readTimeout))
- }
-
- if cmd == "" {
- reply := make([]interface{}, pending)
- for i := range reply {
- r, e := c.readReply()
- if e != nil {
- return nil, c.fatal(e)
- }
- reply[i] = r
- }
- return reply, nil
- }
-
- var err error
- var reply interface{}
- for i := 0; i <= pending; i++ {
- var e error
- if reply, e = c.readReply(); e != nil {
- return nil, c.fatal(e)
- }
- if e, ok := reply.(Error); ok && err == nil {
- err = e
- }
- }
- return reply, err
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/conn_test.go b/vendor/src/github.com/garyburd/redigo/redis/conn_test.go
deleted file mode 100644
index 2ead633..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/conn_test.go
+++ /dev/null
@@ -1,670 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis_test
-
-import (
- "bytes"
- "io"
- "math"
- "net"
- "os"
- "reflect"
- "strings"
- "testing"
- "time"
-
- "github.com/garyburd/redigo/redis"
-)
-
-type testConn struct {
- io.Reader
- io.Writer
-}
-
-func (*testConn) Close() error { return nil }
-func (*testConn) LocalAddr() net.Addr { return nil }
-func (*testConn) RemoteAddr() net.Addr { return nil }
-func (*testConn) SetDeadline(t time.Time) error { return nil }
-func (*testConn) SetReadDeadline(t time.Time) error { return nil }
-func (*testConn) SetWriteDeadline(t time.Time) error { return nil }
-
-func dialTestConn(r io.Reader, w io.Writer) redis.DialOption {
- return redis.DialNetDial(func(net, addr string) (net.Conn, error) {
- return &testConn{Reader: r, Writer: w}, nil
- })
-}
-
-var writeTests = []struct {
- args []interface{}
- expected string
-}{
- {
- []interface{}{"SET", "key", "value"},
- "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n",
- },
- {
- []interface{}{"SET", "key", "value"},
- "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n",
- },
- {
- []interface{}{"SET", "key", byte(100)},
- "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n",
- },
- {
- []interface{}{"SET", "key", 100},
- "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n",
- },
- {
- []interface{}{"SET", "key", int64(math.MinInt64)},
- "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$20\r\n-9223372036854775808\r\n",
- },
- {
- []interface{}{"SET", "key", float64(1349673917.939762)},
- "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$21\r\n1.349673917939762e+09\r\n",
- },
- {
- []interface{}{"SET", "key", ""},
- "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n",
- },
- {
- []interface{}{"SET", "key", nil},
- "*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n",
- },
- {
- []interface{}{"ECHO", true, false},
- "*3\r\n$4\r\nECHO\r\n$1\r\n1\r\n$1\r\n0\r\n",
- },
-}
-
-func TestWrite(t *testing.T) {
- for _, tt := range writeTests {
- var buf bytes.Buffer
- c, _ := redis.Dial("", "", dialTestConn(nil, &buf))
- err := c.Send(tt.args[0].(string), tt.args[1:]...)
- if err != nil {
- t.Errorf("Send(%v) returned error %v", tt.args, err)
- continue
- }
- c.Flush()
- actual := buf.String()
- if actual != tt.expected {
- t.Errorf("Send(%v) = %q, want %q", tt.args, actual, tt.expected)
- }
- }
-}
-
-var errorSentinel = &struct{}{}
-
-var readTests = []struct {
- reply string
- expected interface{}
-}{
- {
- "+OK\r\n",
- "OK",
- },
- {
- "+PONG\r\n",
- "PONG",
- },
- {
- "@OK\r\n",
- errorSentinel,
- },
- {
- "$6\r\nfoobar\r\n",
- []byte("foobar"),
- },
- {
- "$-1\r\n",
- nil,
- },
- {
- ":1\r\n",
- int64(1),
- },
- {
- ":-2\r\n",
- int64(-2),
- },
- {
- "*0\r\n",
- []interface{}{},
- },
- {
- "*-1\r\n",
- nil,
- },
- {
- "*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n",
- []interface{}{[]byte("foo"), []byte("bar"), []byte("Hello"), []byte("World")},
- },
- {
- "*3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n",
- []interface{}{[]byte("foo"), nil, []byte("bar")},
- },
-
- {
- // "x" is not a valid length
- "$x\r\nfoobar\r\n",
- errorSentinel,
- },
- {
- // -2 is not a valid length
- "$-2\r\n",
- errorSentinel,
- },
- {
- // "x" is not a valid integer
- ":x\r\n",
- errorSentinel,
- },
- {
- // missing \r\n following value
- "$6\r\nfoobar",
- errorSentinel,
- },
- {
- // short value
- "$6\r\nxx",
- errorSentinel,
- },
- {
- // long value
- "$6\r\nfoobarx\r\n",
- errorSentinel,
- },
-}
-
-func TestRead(t *testing.T) {
- for _, tt := range readTests {
- c, _ := redis.Dial("", "", dialTestConn(strings.NewReader(tt.reply), nil))
- actual, err := c.Receive()
- if tt.expected == errorSentinel {
- if err == nil {
- t.Errorf("Receive(%q) did not return expected error", tt.reply)
- }
- } else {
- if err != nil {
- t.Errorf("Receive(%q) returned error %v", tt.reply, err)
- continue
- }
- if !reflect.DeepEqual(actual, tt.expected) {
- t.Errorf("Receive(%q) = %v, want %v", tt.reply, actual, tt.expected)
- }
- }
- }
-}
-
-var testCommands = []struct {
- args []interface{}
- expected interface{}
-}{
- {
- []interface{}{"PING"},
- "PONG",
- },
- {
- []interface{}{"SET", "foo", "bar"},
- "OK",
- },
- {
- []interface{}{"GET", "foo"},
- []byte("bar"),
- },
- {
- []interface{}{"GET", "nokey"},
- nil,
- },
- {
- []interface{}{"MGET", "nokey", "foo"},
- []interface{}{nil, []byte("bar")},
- },
- {
- []interface{}{"INCR", "mycounter"},
- int64(1),
- },
- {
- []interface{}{"LPUSH", "mylist", "foo"},
- int64(1),
- },
- {
- []interface{}{"LPUSH", "mylist", "bar"},
- int64(2),
- },
- {
- []interface{}{"LRANGE", "mylist", 0, -1},
- []interface{}{[]byte("bar"), []byte("foo")},
- },
- {
- []interface{}{"MULTI"},
- "OK",
- },
- {
- []interface{}{"LRANGE", "mylist", 0, -1},
- "QUEUED",
- },
- {
- []interface{}{"PING"},
- "QUEUED",
- },
- {
- []interface{}{"EXEC"},
- []interface{}{
- []interface{}{[]byte("bar"), []byte("foo")},
- "PONG",
- },
- },
-}
-
-func TestDoCommands(t *testing.T) {
- c, err := redis.DialDefaultServer()
- if err != nil {
- t.Fatalf("error connection to database, %v", err)
- }
- defer c.Close()
-
- for _, cmd := range testCommands {
- actual, err := c.Do(cmd.args[0].(string), cmd.args[1:]...)
- if err != nil {
- t.Errorf("Do(%v) returned error %v", cmd.args, err)
- continue
- }
- if !reflect.DeepEqual(actual, cmd.expected) {
- t.Errorf("Do(%v) = %v, want %v", cmd.args, actual, cmd.expected)
- }
- }
-}
-
-func TestPipelineCommands(t *testing.T) {
- c, err := redis.DialDefaultServer()
- if err != nil {
- t.Fatalf("error connection to database, %v", err)
- }
- defer c.Close()
-
- for _, cmd := range testCommands {
- if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil {
- t.Fatalf("Send(%v) returned error %v", cmd.args, err)
- }
- }
- if err := c.Flush(); err != nil {
- t.Errorf("Flush() returned error %v", err)
- }
- for _, cmd := range testCommands {
- actual, err := c.Receive()
- if err != nil {
- t.Fatalf("Receive(%v) returned error %v", cmd.args, err)
- }
- if !reflect.DeepEqual(actual, cmd.expected) {
- t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected)
- }
- }
-}
-
-func TestBlankCommmand(t *testing.T) {
- c, err := redis.DialDefaultServer()
- if err != nil {
- t.Fatalf("error connection to database, %v", err)
- }
- defer c.Close()
-
- for _, cmd := range testCommands {
- if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil {
- t.Fatalf("Send(%v) returned error %v", cmd.args, err)
- }
- }
- reply, err := redis.Values(c.Do(""))
- if err != nil {
- t.Fatalf("Do() returned error %v", err)
- }
- if len(reply) != len(testCommands) {
- t.Fatalf("len(reply)=%d, want %d", len(reply), len(testCommands))
- }
- for i, cmd := range testCommands {
- actual := reply[i]
- if !reflect.DeepEqual(actual, cmd.expected) {
- t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected)
- }
- }
-}
-
-func TestRecvBeforeSend(t *testing.T) {
- c, err := redis.DialDefaultServer()
- if err != nil {
- t.Fatalf("error connection to database, %v", err)
- }
- defer c.Close()
- done := make(chan struct{})
- go func() {
- c.Receive()
- close(done)
- }()
- time.Sleep(time.Millisecond)
- c.Send("PING")
- c.Flush()
- <-done
- _, err = c.Do("")
- if err != nil {
- t.Fatalf("error=%v", err)
- }
-}
-
-func TestError(t *testing.T) {
- c, err := redis.DialDefaultServer()
- if err != nil {
- t.Fatalf("error connection to database, %v", err)
- }
- defer c.Close()
-
- c.Do("SET", "key", "val")
- _, err = c.Do("HSET", "key", "fld", "val")
- if err == nil {
- t.Errorf("Expected err for HSET on string key.")
- }
- if c.Err() != nil {
- t.Errorf("Conn has Err()=%v, expect nil", c.Err())
- }
- _, err = c.Do("SET", "key", "val")
- if err != nil {
- t.Errorf("Do(SET, key, val) returned error %v, expected nil.", err)
- }
-}
-
-func TestReadTimeout(t *testing.T) {
- l, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("net.Listen returned %v", err)
- }
- defer l.Close()
-
- go func() {
- for {
- c, err := l.Accept()
- if err != nil {
- return
- }
- go func() {
- time.Sleep(time.Second)
- c.Write([]byte("+OK\r\n"))
- c.Close()
- }()
- }
- }()
-
- // Do
-
- c1, err := redis.Dial(l.Addr().Network(), l.Addr().String(), redis.DialReadTimeout(time.Millisecond))
- if err != nil {
- t.Fatalf("redis.Dial returned %v", err)
- }
- defer c1.Close()
-
- _, err = c1.Do("PING")
- if err == nil {
- t.Fatalf("c1.Do() returned nil, expect error")
- }
- if c1.Err() == nil {
- t.Fatalf("c1.Err() = nil, expect error")
- }
-
- // Send/Flush/Receive
-
- c2, err := redis.Dial(l.Addr().Network(), l.Addr().String(), redis.DialReadTimeout(time.Millisecond))
- if err != nil {
- t.Fatalf("redis.Dial returned %v", err)
- }
- defer c2.Close()
-
- c2.Send("PING")
- c2.Flush()
- _, err = c2.Receive()
- if err == nil {
- t.Fatalf("c2.Receive() returned nil, expect error")
- }
- if c2.Err() == nil {
- t.Fatalf("c2.Err() = nil, expect error")
- }
-}
-
-var dialErrors = []struct {
- rawurl string
- expectedError string
-}{
- {
- "localhost",
- "invalid redis URL scheme",
- },
- // The error message for invalid hosts is diffferent in different
- // versions of Go, so just check that there is an error message.
- {
- "redis://weird url",
- "",
- },
- {
- "redis://foo:bar:baz",
- "",
- },
- {
- "http://www.google.com",
- "invalid redis URL scheme: http",
- },
- {
- "redis://localhost:6379/abc123",
- "invalid database: abc123",
- },
-}
-
-func TestDialURLErrors(t *testing.T) {
- for _, d := range dialErrors {
- _, err := redis.DialURL(d.rawurl)
- if err == nil || !strings.Contains(err.Error(), d.expectedError) {
- t.Errorf("DialURL did not return expected error (expected %v to contain %s)", err, d.expectedError)
- }
- }
-}
-
-func TestDialURLPort(t *testing.T) {
- checkPort := func(network, address string) (net.Conn, error) {
- if address != "localhost:6379" {
- t.Errorf("DialURL did not set port to 6379 by default (got %v)", address)
- }
- return nil, nil
- }
- _, err := redis.DialURL("redis://localhost", redis.DialNetDial(checkPort))
- if err != nil {
- t.Error("dial error:", err)
- }
-}
-
-func TestDialURLHost(t *testing.T) {
- checkHost := func(network, address string) (net.Conn, error) {
- if address != "localhost:6379" {
- t.Errorf("DialURL did not set host to localhost by default (got %v)", address)
- }
- return nil, nil
- }
- _, err := redis.DialURL("redis://:6379", redis.DialNetDial(checkHost))
- if err != nil {
- t.Error("dial error:", err)
- }
-}
-
-func TestDialURLPassword(t *testing.T) {
- var buf bytes.Buffer
- _, err := redis.DialURL("redis://x:abc123@localhost", dialTestConn(strings.NewReader("+OK\r\n"), &buf))
- if err != nil {
- t.Error("dial error:", err)
- }
- expected := "*2\r\n$4\r\nAUTH\r\n$6\r\nabc123\r\n"
- actual := buf.String()
- if actual != expected {
- t.Errorf("commands = %q, want %q", actual, expected)
- }
-}
-
-func TestDialURLDatabase(t *testing.T) {
- var buf3 bytes.Buffer
- _, err3 := redis.DialURL("redis://localhost/3", dialTestConn(strings.NewReader("+OK\r\n"), &buf3))
- if err3 != nil {
- t.Error("dial error:", err3)
- }
- expected3 := "*2\r\n$6\r\nSELECT\r\n$1\r\n3\r\n"
- actual3 := buf3.String()
- if actual3 != expected3 {
- t.Errorf("commands = %q, want %q", actual3, expected3)
- }
- // empty DB means 0
- var buf0 bytes.Buffer
- _, err0 := redis.DialURL("redis://localhost/", dialTestConn(strings.NewReader("+OK\r\n"), &buf0))
- if err0 != nil {
- t.Error("dial error:", err0)
- }
- expected0 := ""
- actual0 := buf0.String()
- if actual0 != expected0 {
- t.Errorf("commands = %q, want %q", actual0, expected0)
- }
-}
-
-// Connect to local instance of Redis running on the default port.
-func ExampleDial() {
- c, err := redis.Dial("tcp", ":6379")
- if err != nil {
- // handle error
- }
- defer c.Close()
-}
-
-// Connect to remote instance of Redis using a URL.
-func ExampleDialURL() {
- c, err := redis.DialURL(os.Getenv("REDIS_URL"))
- if err != nil {
- // handle connection error
- }
- defer c.Close()
-}
-
-// TextExecError tests handling of errors in a transaction. See
-// http://redis.io/topics/transactions for information on how Redis handles
-// errors in a transaction.
-func TestExecError(t *testing.T) {
- c, err := redis.DialDefaultServer()
- if err != nil {
- t.Fatalf("error connection to database, %v", err)
- }
- defer c.Close()
-
- // Execute commands that fail before EXEC is called.
-
- c.Do("DEL", "k0")
- c.Do("ZADD", "k0", 0, 0)
- c.Send("MULTI")
- c.Send("NOTACOMMAND", "k0", 0, 0)
- c.Send("ZINCRBY", "k0", 0, 0)
- v, err := c.Do("EXEC")
- if err == nil {
- t.Fatalf("EXEC returned values %v, expected error", v)
- }
-
- // Execute commands that fail after EXEC is called. The first command
- // returns an error.
-
- c.Do("DEL", "k1")
- c.Do("ZADD", "k1", 0, 0)
- c.Send("MULTI")
- c.Send("HSET", "k1", 0, 0)
- c.Send("ZINCRBY", "k1", 0, 0)
- v, err = c.Do("EXEC")
- if err != nil {
- t.Fatalf("EXEC returned error %v", err)
- }
-
- vs, err := redis.Values(v, nil)
- if err != nil {
- t.Fatalf("Values(v) returned error %v", err)
- }
-
- if len(vs) != 2 {
- t.Fatalf("len(vs) == %d, want 2", len(vs))
- }
-
- if _, ok := vs[0].(error); !ok {
- t.Fatalf("first result is type %T, expected error", vs[0])
- }
-
- if _, ok := vs[1].([]byte); !ok {
- t.Fatalf("second result is type %T, expected []byte", vs[1])
- }
-
- // Execute commands that fail after EXEC is called. The second command
- // returns an error.
-
- c.Do("ZADD", "k2", 0, 0)
- c.Send("MULTI")
- c.Send("ZINCRBY", "k2", 0, 0)
- c.Send("HSET", "k2", 0, 0)
- v, err = c.Do("EXEC")
- if err != nil {
- t.Fatalf("EXEC returned error %v", err)
- }
-
- vs, err = redis.Values(v, nil)
- if err != nil {
- t.Fatalf("Values(v) returned error %v", err)
- }
-
- if len(vs) != 2 {
- t.Fatalf("len(vs) == %d, want 2", len(vs))
- }
-
- if _, ok := vs[0].([]byte); !ok {
- t.Fatalf("first result is type %T, expected []byte", vs[0])
- }
-
- if _, ok := vs[1].(error); !ok {
- t.Fatalf("second result is type %T, expected error", vs[2])
- }
-}
-
-func BenchmarkDoEmpty(b *testing.B) {
- b.StopTimer()
- c, err := redis.DialDefaultServer()
- if err != nil {
- b.Fatal(err)
- }
- defer c.Close()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- if _, err := c.Do(""); err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func BenchmarkDoPing(b *testing.B) {
- b.StopTimer()
- c, err := redis.DialDefaultServer()
- if err != nil {
- b.Fatal(err)
- }
- defer c.Close()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- if _, err := c.Do("PING"); err != nil {
- b.Fatal(err)
- }
- }
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/doc.go b/vendor/src/github.com/garyburd/redigo/redis/doc.go
deleted file mode 100644
index 198e67a..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/doc.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-// Package redis is a client for the Redis database.
-//
-// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more
-// documentation about this package.
-//
-// Connections
-//
-// The Conn interface is the primary interface for working with Redis.
-// Applications create connections by calling the Dial, DialWithTimeout or
-// NewConn functions. In the future, functions will be added for creating
-// sharded and other types of connections.
-//
-// The application must call the connection Close method when the application
-// is done with the connection.
-//
-// Executing Commands
-//
-// The Conn interface has a generic method for executing Redis commands:
-//
-// Do(commandName string, args ...interface{}) (reply interface{}, err error)
-//
-// The Redis command reference (http://redis.io/commands) lists the available
-// commands. An example of using the Redis APPEND command is:
-//
-// n, err := conn.Do("APPEND", "key", "value")
-//
-// The Do method converts command arguments to binary strings for transmission
-// to the server as follows:
-//
-// Go Type Conversion
-// []byte Sent as is
-// string Sent as is
-// int, int64 strconv.FormatInt(v)
-// float64 strconv.FormatFloat(v, 'g', -1, 64)
-// bool true -> "1", false -> "0"
-// nil ""
-// all other types fmt.Print(v)
-//
-// Redis command reply types are represented using the following Go types:
-//
-// Redis type Go type
-// error redis.Error
-// integer int64
-// simple string string
-// bulk string []byte or nil if value not present.
-// array []interface{} or nil if value not present.
-//
-// Use type assertions or the reply helper functions to convert from
-// interface{} to the specific Go type for the command result.
-//
-// Pipelining
-//
-// Connections support pipelining using the Send, Flush and Receive methods.
-//
-// Send(commandName string, args ...interface{}) error
-// Flush() error
-// Receive() (reply interface{}, err error)
-//
-// Send writes the command to the connection's output buffer. Flush flushes the
-// connection's output buffer to the server. Receive reads a single reply from
-// the server. The following example shows a simple pipeline.
-//
-// c.Send("SET", "foo", "bar")
-// c.Send("GET", "foo")
-// c.Flush()
-// c.Receive() // reply from SET
-// v, err = c.Receive() // reply from GET
-//
-// The Do method combines the functionality of the Send, Flush and Receive
-// methods. The Do method starts by writing the command and flushing the output
-// buffer. Next, the Do method receives all pending replies including the reply
-// for the command just sent by Do. If any of the received replies is an error,
-// then Do returns the error. If there are no errors, then Do returns the last
-// reply. If the command argument to the Do method is "", then the Do method
-// will flush the output buffer and receive pending replies without sending a
-// command.
-//
-// Use the Send and Do methods to implement pipelined transactions.
-//
-// c.Send("MULTI")
-// c.Send("INCR", "foo")
-// c.Send("INCR", "bar")
-// r, err := c.Do("EXEC")
-// fmt.Println(r) // prints [1, 1]
-//
-// Concurrency
-//
-// Connections support one concurrent caller to the Receive method and one
-// concurrent caller to the Send and Flush methods. No other concurrency is
-// supported including concurrent calls to the Do method.
-//
-// For full concurrent access to Redis, use the thread-safe Pool to get, use
-// and release a connection from within a goroutine. Connections returned from
-// a Pool have the concurrency restrictions described in the previous
-// paragraph.
-//
-// Publish and Subscribe
-//
-// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers.
-//
-// c.Send("SUBSCRIBE", "example")
-// c.Flush()
-// for {
-// reply, err := c.Receive()
-// if err != nil {
-// return err
-// }
-// // process pushed message
-// }
-//
-// The PubSubConn type wraps a Conn with convenience methods for implementing
-// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods
-// send and flush a subscription management command. The receive method
-// converts a pushed message to convenient types for use in a type switch.
-//
-// psc := redis.PubSubConn{c}
-// psc.Subscribe("example")
-// for {
-// switch v := psc.Receive().(type) {
-// case redis.Message:
-// fmt.Printf("%s: message: %s\n", v.Channel, v.Data)
-// case redis.Subscription:
-// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
-// case error:
-// return v
-// }
-// }
-//
-// Reply Helpers
-//
-// The Bool, Int, Bytes, String, Strings and Values functions convert a reply
-// to a value of a specific type. To allow convenient wrapping of calls to the
-// connection Do and Receive methods, the functions take a second argument of
-// type error. If the error is non-nil, then the helper function returns the
-// error. If the error is nil, the function converts the reply to the specified
-// type:
-//
-// exists, err := redis.Bool(c.Do("EXISTS", "foo"))
-// if err != nil {
-// // handle error return from c.Do or type conversion error.
-// }
-//
-// The Scan function converts elements of a array reply to Go types:
-//
-// var value1 int
-// var value2 string
-// reply, err := redis.Values(c.Do("MGET", "key1", "key2"))
-// if err != nil {
-// // handle error
-// }
-// if _, err := redis.Scan(reply, &value1, &value2); err != nil {
-// // handle error
-// }
-package redis // import "github.com/garyburd/redigo/redis"
diff --git a/vendor/src/github.com/garyburd/redigo/redis/log.go b/vendor/src/github.com/garyburd/redigo/redis/log.go
deleted file mode 100644
index 129b86d..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/log.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis
-
-import (
- "bytes"
- "fmt"
- "log"
-)
-
-// NewLoggingConn returns a logging wrapper around a connection.
-func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn {
- if prefix != "" {
- prefix = prefix + "."
- }
- return &loggingConn{conn, logger, prefix}
-}
-
-type loggingConn struct {
- Conn
- logger *log.Logger
- prefix string
-}
-
-func (c *loggingConn) Close() error {
- err := c.Conn.Close()
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err)
- c.logger.Output(2, buf.String())
- return err
-}
-
-func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) {
- const chop = 32
- switch v := v.(type) {
- case []byte:
- if len(v) > chop {
- fmt.Fprintf(buf, "%q...", v[:chop])
- } else {
- fmt.Fprintf(buf, "%q", v)
- }
- case string:
- if len(v) > chop {
- fmt.Fprintf(buf, "%q...", v[:chop])
- } else {
- fmt.Fprintf(buf, "%q", v)
- }
- case []interface{}:
- if len(v) == 0 {
- buf.WriteString("[]")
- } else {
- sep := "["
- fin := "]"
- if len(v) > chop {
- v = v[:chop]
- fin = "...]"
- }
- for _, vv := range v {
- buf.WriteString(sep)
- c.printValue(buf, vv)
- sep = ", "
- }
- buf.WriteString(fin)
- }
- default:
- fmt.Fprint(buf, v)
- }
-}
-
-func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) {
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "%s%s(", c.prefix, method)
- if method != "Receive" {
- buf.WriteString(commandName)
- for _, arg := range args {
- buf.WriteString(", ")
- c.printValue(&buf, arg)
- }
- }
- buf.WriteString(") -> (")
- if method != "Send" {
- c.printValue(&buf, reply)
- buf.WriteString(", ")
- }
- fmt.Fprintf(&buf, "%v)", err)
- c.logger.Output(3, buf.String())
-}
-
-func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) {
- reply, err := c.Conn.Do(commandName, args...)
- c.print("Do", commandName, args, reply, err)
- return reply, err
-}
-
-func (c *loggingConn) Send(commandName string, args ...interface{}) error {
- err := c.Conn.Send(commandName, args...)
- c.print("Send", commandName, args, nil, err)
- return err
-}
-
-func (c *loggingConn) Receive() (interface{}, error) {
- reply, err := c.Conn.Receive()
- c.print("Receive", "", nil, reply, err)
- return reply, err
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/pool.go b/vendor/src/github.com/garyburd/redigo/redis/pool.go
deleted file mode 100644
index 3d23360..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/pool.go
+++ /dev/null
@@ -1,397 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis
-
-import (
- "bytes"
- "container/list"
- "crypto/rand"
- "crypto/sha1"
- "errors"
- "io"
- "strconv"
- "sync"
- "time"
-
- "github.com/garyburd/redigo/internal"
-)
-
-var nowFunc = time.Now // for testing
-
-// ErrPoolExhausted is returned from a pool connection method (Do, Send,
-// Receive, Flush, Err) when the maximum number of database connections in the
-// pool has been reached.
-var ErrPoolExhausted = errors.New("redigo: connection pool exhausted")
-
-var (
- errPoolClosed = errors.New("redigo: connection pool closed")
- errConnClosed = errors.New("redigo: connection closed")
-)
-
-// Pool maintains a pool of connections. The application calls the Get method
-// to get a connection from the pool and the connection's Close method to
-// return the connection's resources to the pool.
-//
-// The following example shows how to use a pool in a web application. The
-// application creates a pool at application startup and makes it available to
-// request handlers using a global variable. The pool configuration used here
-// is an example, not a recommendation.
-//
-// func newPool(server, password string) *redis.Pool {
-// return &redis.Pool{
-// MaxIdle: 3,
-// IdleTimeout: 240 * time.Second,
-// Dial: func () (redis.Conn, error) {
-// c, err := redis.Dial("tcp", server)
-// if err != nil {
-// return nil, err
-// }
-// if _, err := c.Do("AUTH", password); err != nil {
-// c.Close()
-// return nil, err
-// }
-// return c, err
-// },
-// TestOnBorrow: func(c redis.Conn, t time.Time) error {
-// if time.Since(t) < time.Minute {
-// return nil
-// }
-// _, err := c.Do("PING")
-// return err
-// },
-// }
-// }
-//
-// var (
-// pool *redis.Pool
-// redisServer = flag.String("redisServer", ":6379", "")
-// redisPassword = flag.String("redisPassword", "", "")
-// )
-//
-// func main() {
-// flag.Parse()
-// pool = newPool(*redisServer, *redisPassword)
-// ...
-// }
-//
-// A request handler gets a connection from the pool and closes the connection
-// when the handler is done:
-//
-// func serveHome(w http.ResponseWriter, r *http.Request) {
-// conn := pool.Get()
-// defer conn.Close()
-// ....
-// }
-//
-type Pool struct {
-
- // Dial is an application supplied function for creating and configuring a
- // connection.
- //
- // The connection returned from Dial must not be in a special state
- // (subscribed to pubsub channel, transaction started, ...).
- Dial func() (Conn, error)
-
- // TestOnBorrow is an optional application supplied function for checking
- // the health of an idle connection before the connection is used again by
- // the application. Argument t is the time that the connection was returned
- // to the pool. If the function returns an error, then the connection is
- // closed.
- TestOnBorrow func(c Conn, t time.Time) error
-
- // Maximum number of idle connections in the pool.
- MaxIdle int
-
- // Maximum number of connections allocated by the pool at a given time.
- // When zero, there is no limit on the number of connections in the pool.
- MaxActive int
-
- // Close connections after remaining idle for this duration. If the value
- // is zero, then idle connections are not closed. Applications should set
- // the timeout to a value less than the server's timeout.
- IdleTimeout time.Duration
-
- // If Wait is true and the pool is at the MaxActive limit, then Get() waits
- // for a connection to be returned to the pool before returning.
- Wait bool
-
- // mu protects fields defined below.
- mu sync.Mutex
- cond *sync.Cond
- closed bool
- active int
-
- // Stack of idleConn with most recently used at the front.
- idle list.List
-}
-
-type idleConn struct {
- c Conn
- t time.Time
-}
-
-// NewPool creates a new pool.
-//
-// Deprecated: Initialize the Pool directory as shown in the example.
-func NewPool(newFn func() (Conn, error), maxIdle int) *Pool {
- return &Pool{Dial: newFn, MaxIdle: maxIdle}
-}
-
-// Get gets a connection. The application must close the returned connection.
-// This method always returns a valid connection so that applications can defer
-// error handling to the first use of the connection. If there is an error
-// getting an underlying connection, then the connection Err, Do, Send, Flush
-// and Receive methods return that error.
-func (p *Pool) Get() Conn {
- c, err := p.get()
- if err != nil {
- return errorConnection{err}
- }
- return &pooledConnection{p: p, c: c}
-}
-
-// ActiveCount returns the number of active connections in the pool.
-func (p *Pool) ActiveCount() int {
- p.mu.Lock()
- active := p.active
- p.mu.Unlock()
- return active
-}
-
-// Close releases the resources used by the pool.
-func (p *Pool) Close() error {
- p.mu.Lock()
- idle := p.idle
- p.idle.Init()
- p.closed = true
- p.active -= idle.Len()
- if p.cond != nil {
- p.cond.Broadcast()
- }
- p.mu.Unlock()
- for e := idle.Front(); e != nil; e = e.Next() {
- e.Value.(idleConn).c.Close()
- }
- return nil
-}
-
-// release decrements the active count and signals waiters. The caller must
-// hold p.mu during the call.
-func (p *Pool) release() {
- p.active -= 1
- if p.cond != nil {
- p.cond.Signal()
- }
-}
-
-// get prunes stale connections and returns a connection from the idle list or
-// creates a new connection.
-func (p *Pool) get() (Conn, error) {
- p.mu.Lock()
-
- // Prune stale connections.
-
- if timeout := p.IdleTimeout; timeout > 0 {
- for i, n := 0, p.idle.Len(); i < n; i++ {
- e := p.idle.Back()
- if e == nil {
- break
- }
- ic := e.Value.(idleConn)
- if ic.t.Add(timeout).After(nowFunc()) {
- break
- }
- p.idle.Remove(e)
- p.release()
- p.mu.Unlock()
- ic.c.Close()
- p.mu.Lock()
- }
- }
-
- for {
-
- // Get idle connection.
-
- for i, n := 0, p.idle.Len(); i < n; i++ {
- e := p.idle.Front()
- if e == nil {
- break
- }
- ic := e.Value.(idleConn)
- p.idle.Remove(e)
- test := p.TestOnBorrow
- p.mu.Unlock()
- if test == nil || test(ic.c, ic.t) == nil {
- return ic.c, nil
- }
- ic.c.Close()
- p.mu.Lock()
- p.release()
- }
-
- // Check for pool closed before dialing a new connection.
-
- if p.closed {
- p.mu.Unlock()
- return nil, errors.New("redigo: get on closed pool")
- }
-
- // Dial new connection if under limit.
-
- if p.MaxActive == 0 || p.active < p.MaxActive {
- dial := p.Dial
- p.active += 1
- p.mu.Unlock()
- c, err := dial()
- if err != nil {
- p.mu.Lock()
- p.release()
- p.mu.Unlock()
- c = nil
- }
- return c, err
- }
-
- if !p.Wait {
- p.mu.Unlock()
- return nil, ErrPoolExhausted
- }
-
- if p.cond == nil {
- p.cond = sync.NewCond(&p.mu)
- }
- p.cond.Wait()
- }
-}
-
-func (p *Pool) put(c Conn, forceClose bool) error {
- err := c.Err()
- p.mu.Lock()
- if !p.closed && err == nil && !forceClose {
- p.idle.PushFront(idleConn{t: nowFunc(), c: c})
- if p.idle.Len() > p.MaxIdle {
- c = p.idle.Remove(p.idle.Back()).(idleConn).c
- } else {
- c = nil
- }
- }
-
- if c == nil {
- if p.cond != nil {
- p.cond.Signal()
- }
- p.mu.Unlock()
- return nil
- }
-
- p.release()
- p.mu.Unlock()
- return c.Close()
-}
-
-type pooledConnection struct {
- p *Pool
- c Conn
- state int
-}
-
-var (
- sentinel []byte
- sentinelOnce sync.Once
-)
-
-func initSentinel() {
- p := make([]byte, 64)
- if _, err := rand.Read(p); err == nil {
- sentinel = p
- } else {
- h := sha1.New()
- io.WriteString(h, "Oops, rand failed. Use time instead.")
- io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10))
- sentinel = h.Sum(nil)
- }
-}
-
-func (pc *pooledConnection) Close() error {
- c := pc.c
- if _, ok := c.(errorConnection); ok {
- return nil
- }
- pc.c = errorConnection{errConnClosed}
-
- if pc.state&internal.MultiState != 0 {
- c.Send("DISCARD")
- pc.state &^= (internal.MultiState | internal.WatchState)
- } else if pc.state&internal.WatchState != 0 {
- c.Send("UNWATCH")
- pc.state &^= internal.WatchState
- }
- if pc.state&internal.SubscribeState != 0 {
- c.Send("UNSUBSCRIBE")
- c.Send("PUNSUBSCRIBE")
- // To detect the end of the message stream, ask the server to echo
- // a sentinel value and read until we see that value.
- sentinelOnce.Do(initSentinel)
- c.Send("ECHO", sentinel)
- c.Flush()
- for {
- p, err := c.Receive()
- if err != nil {
- break
- }
- if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) {
- pc.state &^= internal.SubscribeState
- break
- }
- }
- }
- c.Do("")
- pc.p.put(c, pc.state != 0)
- return nil
-}
-
-func (pc *pooledConnection) Err() error {
- return pc.c.Err()
-}
-
-func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
- ci := internal.LookupCommandInfo(commandName)
- pc.state = (pc.state | ci.Set) &^ ci.Clear
- return pc.c.Do(commandName, args...)
-}
-
-func (pc *pooledConnection) Send(commandName string, args ...interface{}) error {
- ci := internal.LookupCommandInfo(commandName)
- pc.state = (pc.state | ci.Set) &^ ci.Clear
- return pc.c.Send(commandName, args...)
-}
-
-func (pc *pooledConnection) Flush() error {
- return pc.c.Flush()
-}
-
-func (pc *pooledConnection) Receive() (reply interface{}, err error) {
- return pc.c.Receive()
-}
-
-type errorConnection struct{ err error }
-
-func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err }
-func (ec errorConnection) Send(string, ...interface{}) error { return ec.err }
-func (ec errorConnection) Err() error { return ec.err }
-func (ec errorConnection) Close() error { return ec.err }
-func (ec errorConnection) Flush() error { return ec.err }
-func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err }
diff --git a/vendor/src/github.com/garyburd/redigo/redis/pool_test.go b/vendor/src/github.com/garyburd/redigo/redis/pool_test.go
deleted file mode 100644
index 26d2747..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/pool_test.go
+++ /dev/null
@@ -1,684 +0,0 @@
-// Copyright 2011 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis_test
-
-import (
- "errors"
- "io"
- "reflect"
- "sync"
- "testing"
- "time"
-
- "github.com/garyburd/redigo/redis"
-)
-
-type poolTestConn struct {
- d *poolDialer
- err error
- redis.Conn
-}
-
-func (c *poolTestConn) Close() error {
- c.d.mu.Lock()
- c.d.open -= 1
- c.d.mu.Unlock()
- return c.Conn.Close()
-}
-
-func (c *poolTestConn) Err() error { return c.err }
-
-func (c *poolTestConn) Do(commandName string, args ...interface{}) (interface{}, error) {
- if commandName == "ERR" {
- c.err = args[0].(error)
- commandName = "PING"
- }
- if commandName != "" {
- c.d.commands = append(c.d.commands, commandName)
- }
- return c.Conn.Do(commandName, args...)
-}
-
-func (c *poolTestConn) Send(commandName string, args ...interface{}) error {
- c.d.commands = append(c.d.commands, commandName)
- return c.Conn.Send(commandName, args...)
-}
-
-type poolDialer struct {
- mu sync.Mutex
- t *testing.T
- dialed int
- open int
- commands []string
- dialErr error
-}
-
-func (d *poolDialer) dial() (redis.Conn, error) {
- d.mu.Lock()
- d.dialed += 1
- dialErr := d.dialErr
- d.mu.Unlock()
- if dialErr != nil {
- return nil, d.dialErr
- }
- c, err := redis.DialDefaultServer()
- if err != nil {
- return nil, err
- }
- d.mu.Lock()
- d.open += 1
- d.mu.Unlock()
- return &poolTestConn{d: d, Conn: c}, nil
-}
-
-func (d *poolDialer) check(message string, p *redis.Pool, dialed, open int) {
- d.mu.Lock()
- if d.dialed != dialed {
- d.t.Errorf("%s: dialed=%d, want %d", message, d.dialed, dialed)
- }
- if d.open != open {
- d.t.Errorf("%s: open=%d, want %d", message, d.open, open)
- }
- if active := p.ActiveCount(); active != open {
- d.t.Errorf("%s: active=%d, want %d", message, active, open)
- }
- d.mu.Unlock()
-}
-
-func TestPoolReuse(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 2,
- Dial: d.dial,
- }
-
- for i := 0; i < 10; i++ {
- c1 := p.Get()
- c1.Do("PING")
- c2 := p.Get()
- c2.Do("PING")
- c1.Close()
- c2.Close()
- }
-
- d.check("before close", p, 2, 2)
- p.Close()
- d.check("after close", p, 2, 0)
-}
-
-func TestPoolMaxIdle(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 2,
- Dial: d.dial,
- }
- defer p.Close()
-
- for i := 0; i < 10; i++ {
- c1 := p.Get()
- c1.Do("PING")
- c2 := p.Get()
- c2.Do("PING")
- c3 := p.Get()
- c3.Do("PING")
- c1.Close()
- c2.Close()
- c3.Close()
- }
- d.check("before close", p, 12, 2)
- p.Close()
- d.check("after close", p, 12, 0)
-}
-
-func TestPoolError(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 2,
- Dial: d.dial,
- }
- defer p.Close()
-
- c := p.Get()
- c.Do("ERR", io.EOF)
- if c.Err() == nil {
- t.Errorf("expected c.Err() != nil")
- }
- c.Close()
-
- c = p.Get()
- c.Do("ERR", io.EOF)
- c.Close()
-
- d.check(".", p, 2, 0)
-}
-
-func TestPoolClose(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 2,
- Dial: d.dial,
- }
- defer p.Close()
-
- c1 := p.Get()
- c1.Do("PING")
- c2 := p.Get()
- c2.Do("PING")
- c3 := p.Get()
- c3.Do("PING")
-
- c1.Close()
- if _, err := c1.Do("PING"); err == nil {
- t.Errorf("expected error after connection closed")
- }
-
- c2.Close()
- c2.Close()
-
- p.Close()
-
- d.check("after pool close", p, 3, 1)
-
- if _, err := c1.Do("PING"); err == nil {
- t.Errorf("expected error after connection and pool closed")
- }
-
- c3.Close()
-
- d.check("after conn close", p, 3, 0)
-
- c1 = p.Get()
- if _, err := c1.Do("PING"); err == nil {
- t.Errorf("expected error after pool closed")
- }
-}
-
-func TestPoolTimeout(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 2,
- IdleTimeout: 300 * time.Second,
- Dial: d.dial,
- }
- defer p.Close()
-
- now := time.Now()
- redis.SetNowFunc(func() time.Time { return now })
- defer redis.SetNowFunc(time.Now)
-
- c := p.Get()
- c.Do("PING")
- c.Close()
-
- d.check("1", p, 1, 1)
-
- now = now.Add(p.IdleTimeout)
-
- c = p.Get()
- c.Do("PING")
- c.Close()
-
- d.check("2", p, 2, 1)
-}
-
-func TestPoolConcurrenSendReceive(t *testing.T) {
- p := &redis.Pool{
- Dial: redis.DialDefaultServer,
- }
- defer p.Close()
-
- c := p.Get()
- done := make(chan error, 1)
- go func() {
- _, err := c.Receive()
- done <- err
- }()
- c.Send("PING")
- c.Flush()
- err := <-done
- if err != nil {
- t.Fatalf("Receive() returned error %v", err)
- }
- _, err = c.Do("")
- if err != nil {
- t.Fatalf("Do() returned error %v", err)
- }
- c.Close()
-}
-
-func TestPoolBorrowCheck(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 2,
- Dial: d.dial,
- TestOnBorrow: func(redis.Conn, time.Time) error { return redis.Error("BLAH") },
- }
- defer p.Close()
-
- for i := 0; i < 10; i++ {
- c := p.Get()
- c.Do("PING")
- c.Close()
- }
- d.check("1", p, 10, 1)
-}
-
-func TestPoolMaxActive(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 2,
- MaxActive: 2,
- Dial: d.dial,
- }
- defer p.Close()
-
- c1 := p.Get()
- c1.Do("PING")
- c2 := p.Get()
- c2.Do("PING")
-
- d.check("1", p, 2, 2)
-
- c3 := p.Get()
- if _, err := c3.Do("PING"); err != redis.ErrPoolExhausted {
- t.Errorf("expected pool exhausted")
- }
-
- c3.Close()
- d.check("2", p, 2, 2)
- c2.Close()
- d.check("3", p, 2, 2)
-
- c3 = p.Get()
- if _, err := c3.Do("PING"); err != nil {
- t.Errorf("expected good channel, err=%v", err)
- }
- c3.Close()
-
- d.check("4", p, 2, 2)
-}
-
-func TestPoolMonitorCleanup(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 2,
- MaxActive: 2,
- Dial: d.dial,
- }
- defer p.Close()
-
- c := p.Get()
- c.Send("MONITOR")
- c.Close()
-
- d.check("", p, 1, 0)
-}
-
-func TestPoolPubSubCleanup(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 2,
- MaxActive: 2,
- Dial: d.dial,
- }
- defer p.Close()
-
- c := p.Get()
- c.Send("SUBSCRIBE", "x")
- c.Close()
-
- want := []string{"SUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"}
- if !reflect.DeepEqual(d.commands, want) {
- t.Errorf("got commands %v, want %v", d.commands, want)
- }
- d.commands = nil
-
- c = p.Get()
- c.Send("PSUBSCRIBE", "x*")
- c.Close()
-
- want = []string{"PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"}
- if !reflect.DeepEqual(d.commands, want) {
- t.Errorf("got commands %v, want %v", d.commands, want)
- }
- d.commands = nil
-}
-
-func TestPoolTransactionCleanup(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 2,
- MaxActive: 2,
- Dial: d.dial,
- }
- defer p.Close()
-
- c := p.Get()
- c.Do("WATCH", "key")
- c.Do("PING")
- c.Close()
-
- want := []string{"WATCH", "PING", "UNWATCH"}
- if !reflect.DeepEqual(d.commands, want) {
- t.Errorf("got commands %v, want %v", d.commands, want)
- }
- d.commands = nil
-
- c = p.Get()
- c.Do("WATCH", "key")
- c.Do("UNWATCH")
- c.Do("PING")
- c.Close()
-
- want = []string{"WATCH", "UNWATCH", "PING"}
- if !reflect.DeepEqual(d.commands, want) {
- t.Errorf("got commands %v, want %v", d.commands, want)
- }
- d.commands = nil
-
- c = p.Get()
- c.Do("WATCH", "key")
- c.Do("MULTI")
- c.Do("PING")
- c.Close()
-
- want = []string{"WATCH", "MULTI", "PING", "DISCARD"}
- if !reflect.DeepEqual(d.commands, want) {
- t.Errorf("got commands %v, want %v", d.commands, want)
- }
- d.commands = nil
-
- c = p.Get()
- c.Do("WATCH", "key")
- c.Do("MULTI")
- c.Do("DISCARD")
- c.Do("PING")
- c.Close()
-
- want = []string{"WATCH", "MULTI", "DISCARD", "PING"}
- if !reflect.DeepEqual(d.commands, want) {
- t.Errorf("got commands %v, want %v", d.commands, want)
- }
- d.commands = nil
-
- c = p.Get()
- c.Do("WATCH", "key")
- c.Do("MULTI")
- c.Do("EXEC")
- c.Do("PING")
- c.Close()
-
- want = []string{"WATCH", "MULTI", "EXEC", "PING"}
- if !reflect.DeepEqual(d.commands, want) {
- t.Errorf("got commands %v, want %v", d.commands, want)
- }
- d.commands = nil
-}
-
-func startGoroutines(p *redis.Pool, cmd string, args ...interface{}) chan error {
- errs := make(chan error, 10)
- for i := 0; i < cap(errs); i++ {
- go func() {
- c := p.Get()
- _, err := c.Do(cmd, args...)
- errs <- err
- c.Close()
- }()
- }
-
- // Wait for goroutines to block.
- time.Sleep(time.Second / 4)
-
- return errs
-}
-
-func TestWaitPool(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 1,
- MaxActive: 1,
- Dial: d.dial,
- Wait: true,
- }
- defer p.Close()
-
- c := p.Get()
- errs := startGoroutines(p, "PING")
- d.check("before close", p, 1, 1)
- c.Close()
- timeout := time.After(2 * time.Second)
- for i := 0; i < cap(errs); i++ {
- select {
- case err := <-errs:
- if err != nil {
- t.Fatal(err)
- }
- case <-timeout:
- t.Fatalf("timeout waiting for blocked goroutine %d", i)
- }
- }
- d.check("done", p, 1, 1)
-}
-
-func TestWaitPoolClose(t *testing.T) {
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 1,
- MaxActive: 1,
- Dial: d.dial,
- Wait: true,
- }
- defer p.Close()
-
- c := p.Get()
- if _, err := c.Do("PING"); err != nil {
- t.Fatal(err)
- }
- errs := startGoroutines(p, "PING")
- d.check("before close", p, 1, 1)
- p.Close()
- timeout := time.After(2 * time.Second)
- for i := 0; i < cap(errs); i++ {
- select {
- case err := <-errs:
- switch err {
- case nil:
- t.Fatal("blocked goroutine did not get error")
- case redis.ErrPoolExhausted:
- t.Fatal("blocked goroutine got pool exhausted error")
- }
- case <-timeout:
- t.Fatal("timeout waiting for blocked goroutine")
- }
- }
- c.Close()
- d.check("done", p, 1, 0)
-}
-
-func TestWaitPoolCommandError(t *testing.T) {
- testErr := errors.New("test")
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 1,
- MaxActive: 1,
- Dial: d.dial,
- Wait: true,
- }
- defer p.Close()
-
- c := p.Get()
- errs := startGoroutines(p, "ERR", testErr)
- d.check("before close", p, 1, 1)
- c.Close()
- timeout := time.After(2 * time.Second)
- for i := 0; i < cap(errs); i++ {
- select {
- case err := <-errs:
- if err != nil {
- t.Fatal(err)
- }
- case <-timeout:
- t.Fatalf("timeout waiting for blocked goroutine %d", i)
- }
- }
- d.check("done", p, cap(errs), 0)
-}
-
-func TestWaitPoolDialError(t *testing.T) {
- testErr := errors.New("test")
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: 1,
- MaxActive: 1,
- Dial: d.dial,
- Wait: true,
- }
- defer p.Close()
-
- c := p.Get()
- errs := startGoroutines(p, "ERR", testErr)
- d.check("before close", p, 1, 1)
-
- d.dialErr = errors.New("dial")
- c.Close()
-
- nilCount := 0
- errCount := 0
- timeout := time.After(2 * time.Second)
- for i := 0; i < cap(errs); i++ {
- select {
- case err := <-errs:
- switch err {
- case nil:
- nilCount++
- case d.dialErr:
- errCount++
- default:
- t.Fatalf("expected dial error or nil, got %v", err)
- }
- case <-timeout:
- t.Fatalf("timeout waiting for blocked goroutine %d", i)
- }
- }
- if nilCount != 1 {
- t.Errorf("expected one nil error, got %d", nilCount)
- }
- if errCount != cap(errs)-1 {
- t.Errorf("expected %d dial errors, got %d", cap(errs)-1, errCount)
- }
- d.check("done", p, cap(errs), 0)
-}
-
-// Borrowing requires us to iterate over the idle connections, unlock the pool,
-// and perform a blocking operation to check the connection still works. If
-// TestOnBorrow fails, we must reacquire the lock and continue iteration. This
-// test ensures that iteration will work correctly if multiple threads are
-// iterating simultaneously.
-func TestLocking_TestOnBorrowFails_PoolDoesntCrash(t *testing.T) {
- const count = 100
-
- // First we'll Create a pool where the pilfering of idle connections fails.
- d := poolDialer{t: t}
- p := &redis.Pool{
- MaxIdle: count,
- MaxActive: count,
- Dial: d.dial,
- TestOnBorrow: func(c redis.Conn, t time.Time) error {
- return errors.New("No way back into the real world.")
- },
- }
- defer p.Close()
-
- // Fill the pool with idle connections.
- conns := make([]redis.Conn, count)
- for i := range conns {
- conns[i] = p.Get()
- }
- for i := range conns {
- conns[i].Close()
- }
-
- // Spawn a bunch of goroutines to thrash the pool.
- var wg sync.WaitGroup
- wg.Add(count)
- for i := 0; i < count; i++ {
- go func() {
- c := p.Get()
- if c.Err() != nil {
- t.Errorf("pool get failed: %v", c.Err())
- }
- c.Close()
- wg.Done()
- }()
- }
- wg.Wait()
- if d.dialed != count*2 {
- t.Errorf("Expected %d dials, got %d", count*2, d.dialed)
- }
-}
-
-func BenchmarkPoolGet(b *testing.B) {
- b.StopTimer()
- p := redis.Pool{Dial: redis.DialDefaultServer, MaxIdle: 2}
- c := p.Get()
- if err := c.Err(); err != nil {
- b.Fatal(err)
- }
- c.Close()
- defer p.Close()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- c = p.Get()
- c.Close()
- }
-}
-
-func BenchmarkPoolGetErr(b *testing.B) {
- b.StopTimer()
- p := redis.Pool{Dial: redis.DialDefaultServer, MaxIdle: 2}
- c := p.Get()
- if err := c.Err(); err != nil {
- b.Fatal(err)
- }
- c.Close()
- defer p.Close()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- c = p.Get()
- if err := c.Err(); err != nil {
- b.Fatal(err)
- }
- c.Close()
- }
-}
-
-func BenchmarkPoolGetPing(b *testing.B) {
- b.StopTimer()
- p := redis.Pool{Dial: redis.DialDefaultServer, MaxIdle: 2}
- c := p.Get()
- if err := c.Err(); err != nil {
- b.Fatal(err)
- }
- c.Close()
- defer p.Close()
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- c = p.Get()
- if _, err := c.Do("PING"); err != nil {
- b.Fatal(err)
- }
- c.Close()
- }
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/pubsub.go b/vendor/src/github.com/garyburd/redigo/redis/pubsub.go
deleted file mode 100644
index c0ecce8..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/pubsub.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis
-
-import "errors"
-
-// Subscription represents a subscribe or unsubscribe notification.
-type Subscription struct {
-
- // Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe"
- Kind string
-
- // The channel that was changed.
- Channel string
-
- // The current number of subscriptions for connection.
- Count int
-}
-
-// Message represents a message notification.
-type Message struct {
-
- // The originating channel.
- Channel string
-
- // The message data.
- Data []byte
-}
-
-// PMessage represents a pmessage notification.
-type PMessage struct {
-
- // The matched pattern.
- Pattern string
-
- // The originating channel.
- Channel string
-
- // The message data.
- Data []byte
-}
-
-// Pong represents a pubsub pong notification.
-type Pong struct {
- Data string
-}
-
-// PubSubConn wraps a Conn with convenience methods for subscribers.
-type PubSubConn struct {
- Conn Conn
-}
-
-// Close closes the connection.
-func (c PubSubConn) Close() error {
- return c.Conn.Close()
-}
-
-// Subscribe subscribes the connection to the specified channels.
-func (c PubSubConn) Subscribe(channel ...interface{}) error {
- c.Conn.Send("SUBSCRIBE", channel...)
- return c.Conn.Flush()
-}
-
-// PSubscribe subscribes the connection to the given patterns.
-func (c PubSubConn) PSubscribe(channel ...interface{}) error {
- c.Conn.Send("PSUBSCRIBE", channel...)
- return c.Conn.Flush()
-}
-
-// Unsubscribe unsubscribes the connection from the given channels, or from all
-// of them if none is given.
-func (c PubSubConn) Unsubscribe(channel ...interface{}) error {
- c.Conn.Send("UNSUBSCRIBE", channel...)
- return c.Conn.Flush()
-}
-
-// PUnsubscribe unsubscribes the connection from the given patterns, or from all
-// of them if none is given.
-func (c PubSubConn) PUnsubscribe(channel ...interface{}) error {
- c.Conn.Send("PUNSUBSCRIBE", channel...)
- return c.Conn.Flush()
-}
-
-// Ping sends a PING to the server with the specified data.
-func (c PubSubConn) Ping(data string) error {
- c.Conn.Send("PING", data)
- return c.Conn.Flush()
-}
-
-// Receive returns a pushed message as a Subscription, Message, PMessage, Pong
-// or error. The return value is intended to be used directly in a type switch
-// as illustrated in the PubSubConn example.
-func (c PubSubConn) Receive() interface{} {
- reply, err := Values(c.Conn.Receive())
- if err != nil {
- return err
- }
-
- var kind string
- reply, err = Scan(reply, &kind)
- if err != nil {
- return err
- }
-
- switch kind {
- case "message":
- var m Message
- if _, err := Scan(reply, &m.Channel, &m.Data); err != nil {
- return err
- }
- return m
- case "pmessage":
- var pm PMessage
- if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil {
- return err
- }
- return pm
- case "subscribe", "psubscribe", "unsubscribe", "punsubscribe":
- s := Subscription{Kind: kind}
- if _, err := Scan(reply, &s.Channel, &s.Count); err != nil {
- return err
- }
- return s
- case "pong":
- var p Pong
- if _, err := Scan(reply, &p.Data); err != nil {
- return err
- }
- return p
- }
- return errors.New("redigo: unknown pubsub notification")
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/pubsub_test.go b/vendor/src/github.com/garyburd/redigo/redis/pubsub_test.go
deleted file mode 100644
index b955131..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/pubsub_test.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis_test
-
-import (
- "fmt"
- "reflect"
- "sync"
- "testing"
-
- "github.com/garyburd/redigo/redis"
-)
-
-func publish(channel, value interface{}) {
- c, err := dial()
- if err != nil {
- fmt.Println(err)
- return
- }
- defer c.Close()
- c.Do("PUBLISH", channel, value)
-}
-
-// Applications can receive pushed messages from one goroutine and manage subscriptions from another goroutine.
-func ExamplePubSubConn() {
- c, err := dial()
- if err != nil {
- fmt.Println(err)
- return
- }
- defer c.Close()
- var wg sync.WaitGroup
- wg.Add(2)
-
- psc := redis.PubSubConn{Conn: c}
-
- // This goroutine receives and prints pushed notifications from the server.
- // The goroutine exits when the connection is unsubscribed from all
- // channels or there is an error.
- go func() {
- defer wg.Done()
- for {
- switch n := psc.Receive().(type) {
- case redis.Message:
- fmt.Printf("Message: %s %s\n", n.Channel, n.Data)
- case redis.PMessage:
- fmt.Printf("PMessage: %s %s %s\n", n.Pattern, n.Channel, n.Data)
- case redis.Subscription:
- fmt.Printf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count)
- if n.Count == 0 {
- return
- }
- case error:
- fmt.Printf("error: %v\n", n)
- return
- }
- }
- }()
-
- // This goroutine manages subscriptions for the connection.
- go func() {
- defer wg.Done()
-
- psc.Subscribe("example")
- psc.PSubscribe("p*")
-
- // The following function calls publish a message using another
- // connection to the Redis server.
- publish("example", "hello")
- publish("example", "world")
- publish("pexample", "foo")
- publish("pexample", "bar")
-
- // Unsubscribe from all connections. This will cause the receiving
- // goroutine to exit.
- psc.Unsubscribe()
- psc.PUnsubscribe()
- }()
-
- wg.Wait()
-
- // Output:
- // Subscription: subscribe example 1
- // Subscription: psubscribe p* 2
- // Message: example hello
- // Message: example world
- // PMessage: p* pexample foo
- // PMessage: p* pexample bar
- // Subscription: unsubscribe example 1
- // Subscription: punsubscribe p* 0
-}
-
-func expectPushed(t *testing.T, c redis.PubSubConn, message string, expected interface{}) {
- actual := c.Receive()
- if !reflect.DeepEqual(actual, expected) {
- t.Errorf("%s = %v, want %v", message, actual, expected)
- }
-}
-
-func TestPushed(t *testing.T) {
- pc, err := redis.DialDefaultServer()
- if err != nil {
- t.Fatalf("error connection to database, %v", err)
- }
- defer pc.Close()
-
- sc, err := redis.DialDefaultServer()
- if err != nil {
- t.Fatalf("error connection to database, %v", err)
- }
- defer sc.Close()
-
- c := redis.PubSubConn{Conn: sc}
-
- c.Subscribe("c1")
- expectPushed(t, c, "Subscribe(c1)", redis.Subscription{Kind: "subscribe", Channel: "c1", Count: 1})
- c.Subscribe("c2")
- expectPushed(t, c, "Subscribe(c2)", redis.Subscription{Kind: "subscribe", Channel: "c2", Count: 2})
- c.PSubscribe("p1")
- expectPushed(t, c, "PSubscribe(p1)", redis.Subscription{Kind: "psubscribe", Channel: "p1", Count: 3})
- c.PSubscribe("p2")
- expectPushed(t, c, "PSubscribe(p2)", redis.Subscription{Kind: "psubscribe", Channel: "p2", Count: 4})
- c.PUnsubscribe()
- expectPushed(t, c, "Punsubscribe(p1)", redis.Subscription{Kind: "punsubscribe", Channel: "p1", Count: 3})
- expectPushed(t, c, "Punsubscribe()", redis.Subscription{Kind: "punsubscribe", Channel: "p2", Count: 2})
-
- pc.Do("PUBLISH", "c1", "hello")
- expectPushed(t, c, "PUBLISH c1 hello", redis.Message{Channel: "c1", Data: []byte("hello")})
-
- c.Ping("hello")
- expectPushed(t, c, `Ping("hello")`, redis.Pong{Data: "hello"})
-
- c.Conn.Send("PING")
- c.Conn.Flush()
- expectPushed(t, c, `Send("PING")`, redis.Pong{})
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/redis.go b/vendor/src/github.com/garyburd/redigo/redis/redis.go
deleted file mode 100644
index c90a48e..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/redis.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis
-
-// Error represents an error returned in a command reply.
-type Error string
-
-func (err Error) Error() string { return string(err) }
-
-// Conn represents a connection to a Redis server.
-type Conn interface {
- // Close closes the connection.
- Close() error
-
- // Err returns a non-nil value if the connection is broken. The returned
- // value is either the first non-nil value returned from the underlying
- // network connection or a protocol parsing error. Applications should
- // close broken connections.
- Err() error
-
- // Do sends a command to the server and returns the received reply.
- Do(commandName string, args ...interface{}) (reply interface{}, err error)
-
- // Send writes the command to the client's output buffer.
- Send(commandName string, args ...interface{}) error
-
- // Flush flushes the output buffer to the Redis server.
- Flush() error
-
- // Receive receives a single reply from the Redis server
- Receive() (reply interface{}, err error)
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/reply.go b/vendor/src/github.com/garyburd/redigo/redis/reply.go
deleted file mode 100644
index 5789614..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/reply.go
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis
-
-import (
- "errors"
- "fmt"
- "strconv"
-)
-
-// ErrNil indicates that a reply value is nil.
-var ErrNil = errors.New("redigo: nil returned")
-
-// Int is a helper that converts a command reply to an integer. If err is not
-// equal to nil, then Int returns 0, err. Otherwise, Int converts the
-// reply to an int as follows:
-//
-// Reply type Result
-// integer int(reply), nil
-// bulk string parsed reply, nil
-// nil 0, ErrNil
-// other 0, error
-func Int(reply interface{}, err error) (int, error) {
- if err != nil {
- return 0, err
- }
- switch reply := reply.(type) {
- case int64:
- x := int(reply)
- if int64(x) != reply {
- return 0, strconv.ErrRange
- }
- return x, nil
- case []byte:
- n, err := strconv.ParseInt(string(reply), 10, 0)
- return int(n), err
- case nil:
- return 0, ErrNil
- case Error:
- return 0, reply
- }
- return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply)
-}
-
-// Int64 is a helper that converts a command reply to 64 bit integer. If err is
-// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
-// reply to an int64 as follows:
-//
-// Reply type Result
-// integer reply, nil
-// bulk string parsed reply, nil
-// nil 0, ErrNil
-// other 0, error
-func Int64(reply interface{}, err error) (int64, error) {
- if err != nil {
- return 0, err
- }
- switch reply := reply.(type) {
- case int64:
- return reply, nil
- case []byte:
- n, err := strconv.ParseInt(string(reply), 10, 64)
- return n, err
- case nil:
- return 0, ErrNil
- case Error:
- return 0, reply
- }
- return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply)
-}
-
-var errNegativeInt = errors.New("redigo: unexpected value for Uint64")
-
-// Uint64 is a helper that converts a command reply to 64 bit integer. If err is
-// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
-// reply to an int64 as follows:
-//
-// Reply type Result
-// integer reply, nil
-// bulk string parsed reply, nil
-// nil 0, ErrNil
-// other 0, error
-func Uint64(reply interface{}, err error) (uint64, error) {
- if err != nil {
- return 0, err
- }
- switch reply := reply.(type) {
- case int64:
- if reply < 0 {
- return 0, errNegativeInt
- }
- return uint64(reply), nil
- case []byte:
- n, err := strconv.ParseUint(string(reply), 10, 64)
- return n, err
- case nil:
- return 0, ErrNil
- case Error:
- return 0, reply
- }
- return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply)
-}
-
-// Float64 is a helper that converts a command reply to 64 bit float. If err is
-// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts
-// the reply to an int as follows:
-//
-// Reply type Result
-// bulk string parsed reply, nil
-// nil 0, ErrNil
-// other 0, error
-func Float64(reply interface{}, err error) (float64, error) {
- if err != nil {
- return 0, err
- }
- switch reply := reply.(type) {
- case []byte:
- n, err := strconv.ParseFloat(string(reply), 64)
- return n, err
- case nil:
- return 0, ErrNil
- case Error:
- return 0, reply
- }
- return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply)
-}
-
-// String is a helper that converts a command reply to a string. If err is not
-// equal to nil, then String returns "", err. Otherwise String converts the
-// reply to a string as follows:
-//
-// Reply type Result
-// bulk string string(reply), nil
-// simple string reply, nil
-// nil "", ErrNil
-// other "", error
-func String(reply interface{}, err error) (string, error) {
- if err != nil {
- return "", err
- }
- switch reply := reply.(type) {
- case []byte:
- return string(reply), nil
- case string:
- return reply, nil
- case nil:
- return "", ErrNil
- case Error:
- return "", reply
- }
- return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply)
-}
-
-// Bytes is a helper that converts a command reply to a slice of bytes. If err
-// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts
-// the reply to a slice of bytes as follows:
-//
-// Reply type Result
-// bulk string reply, nil
-// simple string []byte(reply), nil
-// nil nil, ErrNil
-// other nil, error
-func Bytes(reply interface{}, err error) ([]byte, error) {
- if err != nil {
- return nil, err
- }
- switch reply := reply.(type) {
- case []byte:
- return reply, nil
- case string:
- return []byte(reply), nil
- case nil:
- return nil, ErrNil
- case Error:
- return nil, reply
- }
- return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply)
-}
-
-// Bool is a helper that converts a command reply to a boolean. If err is not
-// equal to nil, then Bool returns false, err. Otherwise Bool converts the
-// reply to boolean as follows:
-//
-// Reply type Result
-// integer value != 0, nil
-// bulk string strconv.ParseBool(reply)
-// nil false, ErrNil
-// other false, error
-func Bool(reply interface{}, err error) (bool, error) {
- if err != nil {
- return false, err
- }
- switch reply := reply.(type) {
- case int64:
- return reply != 0, nil
- case []byte:
- return strconv.ParseBool(string(reply))
- case nil:
- return false, ErrNil
- case Error:
- return false, reply
- }
- return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply)
-}
-
-// MultiBulk is a helper that converts an array command reply to a []interface{}.
-//
-// Deprecated: Use Values instead.
-func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) }
-
-// Values is a helper that converts an array command reply to a []interface{}.
-// If err is not equal to nil, then Values returns nil, err. Otherwise, Values
-// converts the reply as follows:
-//
-// Reply type Result
-// array reply, nil
-// nil nil, ErrNil
-// other nil, error
-func Values(reply interface{}, err error) ([]interface{}, error) {
- if err != nil {
- return nil, err
- }
- switch reply := reply.(type) {
- case []interface{}:
- return reply, nil
- case nil:
- return nil, ErrNil
- case Error:
- return nil, reply
- }
- return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply)
-}
-
-// Strings is a helper that converts an array command reply to a []string. If
-// err is not equal to nil, then Strings returns nil, err. Nil array items are
-// converted to "" in the output slice. Strings returns an error if an array
-// item is not a bulk string or nil.
-func Strings(reply interface{}, err error) ([]string, error) {
- if err != nil {
- return nil, err
- }
- switch reply := reply.(type) {
- case []interface{}:
- result := make([]string, len(reply))
- for i := range reply {
- if reply[i] == nil {
- continue
- }
- p, ok := reply[i].([]byte)
- if !ok {
- return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i])
- }
- result[i] = string(p)
- }
- return result, nil
- case nil:
- return nil, ErrNil
- case Error:
- return nil, reply
- }
- return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply)
-}
-
-// ByteSlices is a helper that converts an array command reply to a [][]byte.
-// If err is not equal to nil, then ByteSlices returns nil, err. Nil array
-// items are stay nil. ByteSlices returns an error if an array item is not a
-// bulk string or nil.
-func ByteSlices(reply interface{}, err error) ([][]byte, error) {
- if err != nil {
- return nil, err
- }
- switch reply := reply.(type) {
- case []interface{}:
- result := make([][]byte, len(reply))
- for i := range reply {
- if reply[i] == nil {
- continue
- }
- p, ok := reply[i].([]byte)
- if !ok {
- return nil, fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", reply[i])
- }
- result[i] = p
- }
- return result, nil
- case nil:
- return nil, ErrNil
- case Error:
- return nil, reply
- }
- return nil, fmt.Errorf("redigo: unexpected type for ByteSlices, got type %T", reply)
-}
-
-// Ints is a helper that converts an array command reply to a []int. If
-// err is not equal to nil, then Ints returns nil, err.
-func Ints(reply interface{}, err error) ([]int, error) {
- var ints []int
- values, err := Values(reply, err)
- if err != nil {
- return ints, err
- }
- if err := ScanSlice(values, &ints); err != nil {
- return ints, err
- }
- return ints, nil
-}
-
-// StringMap is a helper that converts an array of strings (alternating key, value)
-// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format.
-// Requires an even number of values in result.
-func StringMap(result interface{}, err error) (map[string]string, error) {
- values, err := Values(result, err)
- if err != nil {
- return nil, err
- }
- if len(values)%2 != 0 {
- return nil, errors.New("redigo: StringMap expects even number of values result")
- }
- m := make(map[string]string, len(values)/2)
- for i := 0; i < len(values); i += 2 {
- key, okKey := values[i].([]byte)
- value, okValue := values[i+1].([]byte)
- if !okKey || !okValue {
- return nil, errors.New("redigo: ScanMap key not a bulk string value")
- }
- m[string(key)] = string(value)
- }
- return m, nil
-}
-
-// IntMap is a helper that converts an array of strings (alternating key, value)
-// into a map[string]int. The HGETALL commands return replies in this format.
-// Requires an even number of values in result.
-func IntMap(result interface{}, err error) (map[string]int, error) {
- values, err := Values(result, err)
- if err != nil {
- return nil, err
- }
- if len(values)%2 != 0 {
- return nil, errors.New("redigo: IntMap expects even number of values result")
- }
- m := make(map[string]int, len(values)/2)
- for i := 0; i < len(values); i += 2 {
- key, ok := values[i].([]byte)
- if !ok {
- return nil, errors.New("redigo: ScanMap key not a bulk string value")
- }
- value, err := Int(values[i+1], nil)
- if err != nil {
- return nil, err
- }
- m[string(key)] = value
- }
- return m, nil
-}
-
-// Int64Map is a helper that converts an array of strings (alternating key, value)
-// into a map[string]int64. The HGETALL commands return replies in this format.
-// Requires an even number of values in result.
-func Int64Map(result interface{}, err error) (map[string]int64, error) {
- values, err := Values(result, err)
- if err != nil {
- return nil, err
- }
- if len(values)%2 != 0 {
- return nil, errors.New("redigo: Int64Map expects even number of values result")
- }
- m := make(map[string]int64, len(values)/2)
- for i := 0; i < len(values); i += 2 {
- key, ok := values[i].([]byte)
- if !ok {
- return nil, errors.New("redigo: ScanMap key not a bulk string value")
- }
- value, err := Int64(values[i+1], nil)
- if err != nil {
- return nil, err
- }
- m[string(key)] = value
- }
- return m, nil
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/reply_test.go b/vendor/src/github.com/garyburd/redigo/redis/reply_test.go
deleted file mode 100644
index 2c77486..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/reply_test.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis_test
-
-import (
- "fmt"
- "reflect"
- "testing"
-
- "github.com/garyburd/redigo/redis"
-)
-
-type valueError struct {
- v interface{}
- err error
-}
-
-func ve(v interface{}, err error) valueError {
- return valueError{v, err}
-}
-
-var replyTests = []struct {
- name interface{}
- actual valueError
- expected valueError
-}{
- {
- "ints([v1, v2])",
- ve(redis.Ints([]interface{}{[]byte("4"), []byte("5")}, nil)),
- ve([]int{4, 5}, nil),
- },
- {
- "ints(nil)",
- ve(redis.Ints(nil, nil)),
- ve([]int(nil), redis.ErrNil),
- },
- {
- "strings([v1, v2])",
- ve(redis.Strings([]interface{}{[]byte("v1"), []byte("v2")}, nil)),
- ve([]string{"v1", "v2"}, nil),
- },
- {
- "strings(nil)",
- ve(redis.Strings(nil, nil)),
- ve([]string(nil), redis.ErrNil),
- },
- {
- "byteslices([v1, v2])",
- ve(redis.ByteSlices([]interface{}{[]byte("v1"), []byte("v2")}, nil)),
- ve([][]byte{[]byte("v1"), []byte("v2")}, nil),
- },
- {
- "byteslices(nil)",
- ve(redis.ByteSlices(nil, nil)),
- ve([][]byte(nil), redis.ErrNil),
- },
- {
- "values([v1, v2])",
- ve(redis.Values([]interface{}{[]byte("v1"), []byte("v2")}, nil)),
- ve([]interface{}{[]byte("v1"), []byte("v2")}, nil),
- },
- {
- "values(nil)",
- ve(redis.Values(nil, nil)),
- ve([]interface{}(nil), redis.ErrNil),
- },
- {
- "float64(1.0)",
- ve(redis.Float64([]byte("1.0"), nil)),
- ve(float64(1.0), nil),
- },
- {
- "float64(nil)",
- ve(redis.Float64(nil, nil)),
- ve(float64(0.0), redis.ErrNil),
- },
- {
- "uint64(1)",
- ve(redis.Uint64(int64(1), nil)),
- ve(uint64(1), nil),
- },
- {
- "uint64(-1)",
- ve(redis.Uint64(int64(-1), nil)),
- ve(uint64(0), redis.ErrNegativeInt),
- },
-}
-
-func TestReply(t *testing.T) {
- for _, rt := range replyTests {
- if rt.actual.err != rt.expected.err {
- t.Errorf("%s returned err %v, want %v", rt.name, rt.actual.err, rt.expected.err)
- continue
- }
- if !reflect.DeepEqual(rt.actual.v, rt.expected.v) {
- t.Errorf("%s=%+v, want %+v", rt.name, rt.actual.v, rt.expected.v)
- }
- }
-}
-
-// dial wraps DialDefaultServer() with a more suitable function name for examples.
-func dial() (redis.Conn, error) {
- return redis.DialDefaultServer()
-}
-
-func ExampleBool() {
- c, err := dial()
- if err != nil {
- fmt.Println(err)
- return
- }
- defer c.Close()
-
- c.Do("SET", "foo", 1)
- exists, _ := redis.Bool(c.Do("EXISTS", "foo"))
- fmt.Printf("%#v\n", exists)
- // Output:
- // true
-}
-
-func ExampleInt() {
- c, err := dial()
- if err != nil {
- fmt.Println(err)
- return
- }
- defer c.Close()
-
- c.Do("SET", "k1", 1)
- n, _ := redis.Int(c.Do("GET", "k1"))
- fmt.Printf("%#v\n", n)
- n, _ = redis.Int(c.Do("INCR", "k1"))
- fmt.Printf("%#v\n", n)
- // Output:
- // 1
- // 2
-}
-
-func ExampleInts() {
- c, err := dial()
- if err != nil {
- fmt.Println(err)
- return
- }
- defer c.Close()
-
- c.Do("SADD", "set_with_integers", 4, 5, 6)
- ints, _ := redis.Ints(c.Do("SMEMBERS", "set_with_integers"))
- fmt.Printf("%#v\n", ints)
- // Output:
- // []int{4, 5, 6}
-}
-
-func ExampleString() {
- c, err := dial()
- if err != nil {
- fmt.Println(err)
- return
- }
- defer c.Close()
-
- c.Do("SET", "hello", "world")
- s, err := redis.String(c.Do("GET", "hello"))
- fmt.Printf("%#v\n", s)
- // Output:
- // "world"
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/scan.go b/vendor/src/github.com/garyburd/redigo/redis/scan.go
deleted file mode 100644
index 962e94b..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/scan.go
+++ /dev/null
@@ -1,555 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "sync"
-)
-
-func ensureLen(d reflect.Value, n int) {
- if n > d.Cap() {
- d.Set(reflect.MakeSlice(d.Type(), n, n))
- } else {
- d.SetLen(n)
- }
-}
-
-func cannotConvert(d reflect.Value, s interface{}) error {
- var sname string
- switch s.(type) {
- case string:
- sname = "Redis simple string"
- case Error:
- sname = "Redis error"
- case int64:
- sname = "Redis integer"
- case []byte:
- sname = "Redis bulk string"
- case []interface{}:
- sname = "Redis array"
- default:
- sname = reflect.TypeOf(s).String()
- }
- return fmt.Errorf("cannot convert from %s to %s", sname, d.Type())
-}
-
-func convertAssignBulkString(d reflect.Value, s []byte) (err error) {
- switch d.Type().Kind() {
- case reflect.Float32, reflect.Float64:
- var x float64
- x, err = strconv.ParseFloat(string(s), d.Type().Bits())
- d.SetFloat(x)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- var x int64
- x, err = strconv.ParseInt(string(s), 10, d.Type().Bits())
- d.SetInt(x)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- var x uint64
- x, err = strconv.ParseUint(string(s), 10, d.Type().Bits())
- d.SetUint(x)
- case reflect.Bool:
- var x bool
- x, err = strconv.ParseBool(string(s))
- d.SetBool(x)
- case reflect.String:
- d.SetString(string(s))
- case reflect.Slice:
- if d.Type().Elem().Kind() != reflect.Uint8 {
- err = cannotConvert(d, s)
- } else {
- d.SetBytes(s)
- }
- default:
- err = cannotConvert(d, s)
- }
- return
-}
-
-func convertAssignInt(d reflect.Value, s int64) (err error) {
- switch d.Type().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- d.SetInt(s)
- if d.Int() != s {
- err = strconv.ErrRange
- d.SetInt(0)
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- if s < 0 {
- err = strconv.ErrRange
- } else {
- x := uint64(s)
- d.SetUint(x)
- if d.Uint() != x {
- err = strconv.ErrRange
- d.SetUint(0)
- }
- }
- case reflect.Bool:
- d.SetBool(s != 0)
- default:
- err = cannotConvert(d, s)
- }
- return
-}
-
-func convertAssignValue(d reflect.Value, s interface{}) (err error) {
- switch s := s.(type) {
- case []byte:
- err = convertAssignBulkString(d, s)
- case int64:
- err = convertAssignInt(d, s)
- default:
- err = cannotConvert(d, s)
- }
- return err
-}
-
-func convertAssignArray(d reflect.Value, s []interface{}) error {
- if d.Type().Kind() != reflect.Slice {
- return cannotConvert(d, s)
- }
- ensureLen(d, len(s))
- for i := 0; i < len(s); i++ {
- if err := convertAssignValue(d.Index(i), s[i]); err != nil {
- return err
- }
- }
- return nil
-}
-
-func convertAssign(d interface{}, s interface{}) (err error) {
- // Handle the most common destination types using type switches and
- // fall back to reflection for all other types.
- switch s := s.(type) {
- case nil:
- // ingore
- case []byte:
- switch d := d.(type) {
- case *string:
- *d = string(s)
- case *int:
- *d, err = strconv.Atoi(string(s))
- case *bool:
- *d, err = strconv.ParseBool(string(s))
- case *[]byte:
- *d = s
- case *interface{}:
- *d = s
- case nil:
- // skip value
- default:
- if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
- err = cannotConvert(d, s)
- } else {
- err = convertAssignBulkString(d.Elem(), s)
- }
- }
- case int64:
- switch d := d.(type) {
- case *int:
- x := int(s)
- if int64(x) != s {
- err = strconv.ErrRange
- x = 0
- }
- *d = x
- case *bool:
- *d = s != 0
- case *interface{}:
- *d = s
- case nil:
- // skip value
- default:
- if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
- err = cannotConvert(d, s)
- } else {
- err = convertAssignInt(d.Elem(), s)
- }
- }
- case string:
- switch d := d.(type) {
- case *string:
- *d = string(s)
- default:
- err = cannotConvert(reflect.ValueOf(d), s)
- }
- case []interface{}:
- switch d := d.(type) {
- case *[]interface{}:
- *d = s
- case *interface{}:
- *d = s
- case nil:
- // skip value
- default:
- if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr {
- err = cannotConvert(d, s)
- } else {
- err = convertAssignArray(d.Elem(), s)
- }
- }
- case Error:
- err = s
- default:
- err = cannotConvert(reflect.ValueOf(d), s)
- }
- return
-}
-
-// Scan copies from src to the values pointed at by dest.
-//
-// The values pointed at by dest must be an integer, float, boolean, string,
-// []byte, interface{} or slices of these types. Scan uses the standard strconv
-// package to convert bulk strings to numeric and boolean types.
-//
-// If a dest value is nil, then the corresponding src value is skipped.
-//
-// If a src element is nil, then the corresponding dest value is not modified.
-//
-// To enable easy use of Scan in a loop, Scan returns the slice of src
-// following the copied values.
-func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) {
- if len(src) < len(dest) {
- return nil, errors.New("redigo.Scan: array short")
- }
- var err error
- for i, d := range dest {
- err = convertAssign(d, src[i])
- if err != nil {
- err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err)
- break
- }
- }
- return src[len(dest):], err
-}
-
-type fieldSpec struct {
- name string
- index []int
- omitEmpty bool
-}
-
-type structSpec struct {
- m map[string]*fieldSpec
- l []*fieldSpec
-}
-
-func (ss *structSpec) fieldSpec(name []byte) *fieldSpec {
- return ss.m[string(name)]
-}
-
-func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) {
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- switch {
- case f.PkgPath != "" && !f.Anonymous:
- // Ignore unexported fields.
- case f.Anonymous:
- // TODO: Handle pointers. Requires change to decoder and
- // protection against infinite recursion.
- if f.Type.Kind() == reflect.Struct {
- compileStructSpec(f.Type, depth, append(index, i), ss)
- }
- default:
- fs := &fieldSpec{name: f.Name}
- tag := f.Tag.Get("redis")
- p := strings.Split(tag, ",")
- if len(p) > 0 {
- if p[0] == "-" {
- continue
- }
- if len(p[0]) > 0 {
- fs.name = p[0]
- }
- for _, s := range p[1:] {
- switch s {
- case "omitempty":
- fs.omitEmpty = true
- default:
- panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name()))
- }
- }
- }
- d, found := depth[fs.name]
- if !found {
- d = 1 << 30
- }
- switch {
- case len(index) == d:
- // At same depth, remove from result.
- delete(ss.m, fs.name)
- j := 0
- for i := 0; i < len(ss.l); i++ {
- if fs.name != ss.l[i].name {
- ss.l[j] = ss.l[i]
- j += 1
- }
- }
- ss.l = ss.l[:j]
- case len(index) < d:
- fs.index = make([]int, len(index)+1)
- copy(fs.index, index)
- fs.index[len(index)] = i
- depth[fs.name] = len(index)
- ss.m[fs.name] = fs
- ss.l = append(ss.l, fs)
- }
- }
- }
-}
-
-var (
- structSpecMutex sync.RWMutex
- structSpecCache = make(map[reflect.Type]*structSpec)
- defaultFieldSpec = &fieldSpec{}
-)
-
-func structSpecForType(t reflect.Type) *structSpec {
-
- structSpecMutex.RLock()
- ss, found := structSpecCache[t]
- structSpecMutex.RUnlock()
- if found {
- return ss
- }
-
- structSpecMutex.Lock()
- defer structSpecMutex.Unlock()
- ss, found = structSpecCache[t]
- if found {
- return ss
- }
-
- ss = &structSpec{m: make(map[string]*fieldSpec)}
- compileStructSpec(t, make(map[string]int), nil, ss)
- structSpecCache[t] = ss
- return ss
-}
-
-var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct")
-
-// ScanStruct scans alternating names and values from src to a struct. The
-// HGETALL and CONFIG GET commands return replies in this format.
-//
-// ScanStruct uses exported field names to match values in the response. Use
-// 'redis' field tag to override the name:
-//
-// Field int `redis:"myName"`
-//
-// Fields with the tag redis:"-" are ignored.
-//
-// Integer, float, boolean, string and []byte fields are supported. Scan uses the
-// standard strconv package to convert bulk string values to numeric and
-// boolean types.
-//
-// If a src element is nil, then the corresponding field is not modified.
-func ScanStruct(src []interface{}, dest interface{}) error {
- d := reflect.ValueOf(dest)
- if d.Kind() != reflect.Ptr || d.IsNil() {
- return errScanStructValue
- }
- d = d.Elem()
- if d.Kind() != reflect.Struct {
- return errScanStructValue
- }
- ss := structSpecForType(d.Type())
-
- if len(src)%2 != 0 {
- return errors.New("redigo.ScanStruct: number of values not a multiple of 2")
- }
-
- for i := 0; i < len(src); i += 2 {
- s := src[i+1]
- if s == nil {
- continue
- }
- name, ok := src[i].([]byte)
- if !ok {
- return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i)
- }
- fs := ss.fieldSpec(name)
- if fs == nil {
- continue
- }
- if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
- return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err)
- }
- }
- return nil
-}
-
-var (
- errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct")
-)
-
-// ScanSlice scans src to the slice pointed to by dest. The elements the dest
-// slice must be integer, float, boolean, string, struct or pointer to struct
-// values.
-//
-// Struct fields must be integer, float, boolean or string values. All struct
-// fields are used unless a subset is specified using fieldNames.
-func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error {
- d := reflect.ValueOf(dest)
- if d.Kind() != reflect.Ptr || d.IsNil() {
- return errScanSliceValue
- }
- d = d.Elem()
- if d.Kind() != reflect.Slice {
- return errScanSliceValue
- }
-
- isPtr := false
- t := d.Type().Elem()
- if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
- isPtr = true
- t = t.Elem()
- }
-
- if t.Kind() != reflect.Struct {
- ensureLen(d, len(src))
- for i, s := range src {
- if s == nil {
- continue
- }
- if err := convertAssignValue(d.Index(i), s); err != nil {
- return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err)
- }
- }
- return nil
- }
-
- ss := structSpecForType(t)
- fss := ss.l
- if len(fieldNames) > 0 {
- fss = make([]*fieldSpec, len(fieldNames))
- for i, name := range fieldNames {
- fss[i] = ss.m[name]
- if fss[i] == nil {
- return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name)
- }
- }
- }
-
- if len(fss) == 0 {
- return errors.New("redigo.ScanSlice: no struct fields")
- }
-
- n := len(src) / len(fss)
- if n*len(fss) != len(src) {
- return errors.New("redigo.ScanSlice: length not a multiple of struct field count")
- }
-
- ensureLen(d, n)
- for i := 0; i < n; i++ {
- d := d.Index(i)
- if isPtr {
- if d.IsNil() {
- d.Set(reflect.New(t))
- }
- d = d.Elem()
- }
- for j, fs := range fss {
- s := src[i*len(fss)+j]
- if s == nil {
- continue
- }
- if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil {
- return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err)
- }
- }
- }
- return nil
-}
-
-// Args is a helper for constructing command arguments from structured values.
-type Args []interface{}
-
-// Add returns the result of appending value to args.
-func (args Args) Add(value ...interface{}) Args {
- return append(args, value...)
-}
-
-// AddFlat returns the result of appending the flattened value of v to args.
-//
-// Maps are flattened by appending the alternating keys and map values to args.
-//
-// Slices are flattened by appending the slice elements to args.
-//
-// Structs are flattened by appending the alternating names and values of
-// exported fields to args. If v is a nil struct pointer, then nothing is
-// appended. The 'redis' field tag overrides struct field names. See ScanStruct
-// for more information on the use of the 'redis' field tag.
-//
-// Other types are appended to args as is.
-func (args Args) AddFlat(v interface{}) Args {
- rv := reflect.ValueOf(v)
- switch rv.Kind() {
- case reflect.Struct:
- args = flattenStruct(args, rv)
- case reflect.Slice:
- for i := 0; i < rv.Len(); i++ {
- args = append(args, rv.Index(i).Interface())
- }
- case reflect.Map:
- for _, k := range rv.MapKeys() {
- args = append(args, k.Interface(), rv.MapIndex(k).Interface())
- }
- case reflect.Ptr:
- if rv.Type().Elem().Kind() == reflect.Struct {
- if !rv.IsNil() {
- args = flattenStruct(args, rv.Elem())
- }
- } else {
- args = append(args, v)
- }
- default:
- args = append(args, v)
- }
- return args
-}
-
-func flattenStruct(args Args, v reflect.Value) Args {
- ss := structSpecForType(v.Type())
- for _, fs := range ss.l {
- fv := v.FieldByIndex(fs.index)
- if fs.omitEmpty {
- var empty = false
- switch fv.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- empty = fv.Len() == 0
- case reflect.Bool:
- empty = !fv.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- empty = fv.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- empty = fv.Uint() == 0
- case reflect.Float32, reflect.Float64:
- empty = fv.Float() == 0
- case reflect.Interface, reflect.Ptr:
- empty = fv.IsNil()
- }
- if empty {
- continue
- }
- }
- args = append(args, fs.name, fv.Interface())
- }
- return args
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/scan_test.go b/vendor/src/github.com/garyburd/redigo/redis/scan_test.go
deleted file mode 100644
index d364dff..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/scan_test.go
+++ /dev/null
@@ -1,440 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis_test
-
-import (
- "fmt"
- "math"
- "reflect"
- "testing"
-
- "github.com/garyburd/redigo/redis"
-)
-
-var scanConversionTests = []struct {
- src interface{}
- dest interface{}
-}{
- {[]byte("-inf"), math.Inf(-1)},
- {[]byte("+inf"), math.Inf(1)},
- {[]byte("0"), float64(0)},
- {[]byte("3.14159"), float64(3.14159)},
- {[]byte("3.14"), float32(3.14)},
- {[]byte("-100"), int(-100)},
- {[]byte("101"), int(101)},
- {int64(102), int(102)},
- {[]byte("103"), uint(103)},
- {int64(104), uint(104)},
- {[]byte("105"), int8(105)},
- {int64(106), int8(106)},
- {[]byte("107"), uint8(107)},
- {int64(108), uint8(108)},
- {[]byte("0"), false},
- {int64(0), false},
- {[]byte("f"), false},
- {[]byte("1"), true},
- {int64(1), true},
- {[]byte("t"), true},
- {"hello", "hello"},
- {[]byte("hello"), "hello"},
- {[]byte("world"), []byte("world")},
- {[]interface{}{[]byte("foo")}, []interface{}{[]byte("foo")}},
- {[]interface{}{[]byte("foo")}, []string{"foo"}},
- {[]interface{}{[]byte("hello"), []byte("world")}, []string{"hello", "world"}},
- {[]interface{}{[]byte("bar")}, [][]byte{[]byte("bar")}},
- {[]interface{}{[]byte("1")}, []int{1}},
- {[]interface{}{[]byte("1"), []byte("2")}, []int{1, 2}},
- {[]interface{}{[]byte("1"), []byte("2")}, []float64{1, 2}},
- {[]interface{}{[]byte("1")}, []byte{1}},
- {[]interface{}{[]byte("1")}, []bool{true}},
-}
-
-func TestScanConversion(t *testing.T) {
- for _, tt := range scanConversionTests {
- values := []interface{}{tt.src}
- dest := reflect.New(reflect.TypeOf(tt.dest))
- values, err := redis.Scan(values, dest.Interface())
- if err != nil {
- t.Errorf("Scan(%v) returned error %v", tt, err)
- continue
- }
- if !reflect.DeepEqual(tt.dest, dest.Elem().Interface()) {
- t.Errorf("Scan(%v) returned %v, want %v", tt, dest.Elem().Interface(), tt.dest)
- }
- }
-}
-
-var scanConversionErrorTests = []struct {
- src interface{}
- dest interface{}
-}{
- {[]byte("1234"), byte(0)},
- {int64(1234), byte(0)},
- {[]byte("-1"), byte(0)},
- {int64(-1), byte(0)},
- {[]byte("junk"), false},
- {redis.Error("blah"), false},
-}
-
-func TestScanConversionError(t *testing.T) {
- for _, tt := range scanConversionErrorTests {
- values := []interface{}{tt.src}
- dest := reflect.New(reflect.TypeOf(tt.dest))
- values, err := redis.Scan(values, dest.Interface())
- if err == nil {
- t.Errorf("Scan(%v) did not return error", tt)
- }
- }
-}
-
-func ExampleScan() {
- c, err := dial()
- if err != nil {
- fmt.Println(err)
- return
- }
- defer c.Close()
-
- c.Send("HMSET", "album:1", "title", "Red", "rating", 5)
- c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1)
- c.Send("HMSET", "album:3", "title", "Beat")
- c.Send("LPUSH", "albums", "1")
- c.Send("LPUSH", "albums", "2")
- c.Send("LPUSH", "albums", "3")
- values, err := redis.Values(c.Do("SORT", "albums",
- "BY", "album:*->rating",
- "GET", "album:*->title",
- "GET", "album:*->rating"))
- if err != nil {
- fmt.Println(err)
- return
- }
-
- for len(values) > 0 {
- var title string
- rating := -1 // initialize to illegal value to detect nil.
- values, err = redis.Scan(values, &title, &rating)
- if err != nil {
- fmt.Println(err)
- return
- }
- if rating == -1 {
- fmt.Println(title, "not-rated")
- } else {
- fmt.Println(title, rating)
- }
- }
- // Output:
- // Beat not-rated
- // Earthbound 1
- // Red 5
-}
-
-type s0 struct {
- X int
- Y int `redis:"y"`
- Bt bool
-}
-
-type s1 struct {
- X int `redis:"-"`
- I int `redis:"i"`
- U uint `redis:"u"`
- S string `redis:"s"`
- P []byte `redis:"p"`
- B bool `redis:"b"`
- Bt bool
- Bf bool
- s0
-}
-
-var scanStructTests = []struct {
- title string
- reply []string
- value interface{}
-}{
- {"basic",
- []string{"i", "-1234", "u", "5678", "s", "hello", "p", "world", "b", "t", "Bt", "1", "Bf", "0", "X", "123", "y", "456"},
- &s1{I: -1234, U: 5678, S: "hello", P: []byte("world"), B: true, Bt: true, Bf: false, s0: s0{X: 123, Y: 456}},
- },
-}
-
-func TestScanStruct(t *testing.T) {
- for _, tt := range scanStructTests {
-
- var reply []interface{}
- for _, v := range tt.reply {
- reply = append(reply, []byte(v))
- }
-
- value := reflect.New(reflect.ValueOf(tt.value).Type().Elem())
-
- if err := redis.ScanStruct(reply, value.Interface()); err != nil {
- t.Fatalf("ScanStruct(%s) returned error %v", tt.title, err)
- }
-
- if !reflect.DeepEqual(value.Interface(), tt.value) {
- t.Fatalf("ScanStruct(%s) returned %v, want %v", tt.title, value.Interface(), tt.value)
- }
- }
-}
-
-func TestBadScanStructArgs(t *testing.T) {
- x := []interface{}{"A", "b"}
- test := func(v interface{}) {
- if err := redis.ScanStruct(x, v); err == nil {
- t.Errorf("Expect error for ScanStruct(%T, %T)", x, v)
- }
- }
-
- test(nil)
-
- var v0 *struct{}
- test(v0)
-
- var v1 int
- test(&v1)
-
- x = x[:1]
- v2 := struct{ A string }{}
- test(&v2)
-}
-
-var scanSliceTests = []struct {
- src []interface{}
- fieldNames []string
- ok bool
- dest interface{}
-}{
- {
- []interface{}{[]byte("1"), nil, []byte("-1")},
- nil,
- true,
- []int{1, 0, -1},
- },
- {
- []interface{}{[]byte("1"), nil, []byte("2")},
- nil,
- true,
- []uint{1, 0, 2},
- },
- {
- []interface{}{[]byte("-1")},
- nil,
- false,
- []uint{1},
- },
- {
- []interface{}{[]byte("hello"), nil, []byte("world")},
- nil,
- true,
- [][]byte{[]byte("hello"), nil, []byte("world")},
- },
- {
- []interface{}{[]byte("hello"), nil, []byte("world")},
- nil,
- true,
- []string{"hello", "", "world"},
- },
- {
- []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")},
- nil,
- true,
- []struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}},
- },
- {
- []interface{}{[]byte("a1"), []byte("b1")},
- nil,
- false,
- []struct{ A, B, C string }{{"a1", "b1", ""}},
- },
- {
- []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")},
- nil,
- true,
- []*struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}},
- },
- {
- []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")},
- []string{"A", "B"},
- true,
- []struct{ A, C, B string }{{"a1", "", "b1"}, {"a2", "", "b2"}},
- },
- {
- []interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")},
- nil,
- false,
- []struct{}{},
- },
-}
-
-func TestScanSlice(t *testing.T) {
- for _, tt := range scanSliceTests {
-
- typ := reflect.ValueOf(tt.dest).Type()
- dest := reflect.New(typ)
-
- err := redis.ScanSlice(tt.src, dest.Interface(), tt.fieldNames...)
- if tt.ok != (err == nil) {
- t.Errorf("ScanSlice(%v, []%s, %v) returned error %v", tt.src, typ, tt.fieldNames, err)
- continue
- }
- if tt.ok && !reflect.DeepEqual(dest.Elem().Interface(), tt.dest) {
- t.Errorf("ScanSlice(src, []%s) returned %#v, want %#v", typ, dest.Elem().Interface(), tt.dest)
- }
- }
-}
-
-func ExampleScanSlice() {
- c, err := dial()
- if err != nil {
- fmt.Println(err)
- return
- }
- defer c.Close()
-
- c.Send("HMSET", "album:1", "title", "Red", "rating", 5)
- c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1)
- c.Send("HMSET", "album:3", "title", "Beat", "rating", 4)
- c.Send("LPUSH", "albums", "1")
- c.Send("LPUSH", "albums", "2")
- c.Send("LPUSH", "albums", "3")
- values, err := redis.Values(c.Do("SORT", "albums",
- "BY", "album:*->rating",
- "GET", "album:*->title",
- "GET", "album:*->rating"))
- if err != nil {
- fmt.Println(err)
- return
- }
-
- var albums []struct {
- Title string
- Rating int
- }
- if err := redis.ScanSlice(values, &albums); err != nil {
- fmt.Println(err)
- return
- }
- fmt.Printf("%v\n", albums)
- // Output:
- // [{Earthbound 1} {Beat 4} {Red 5}]
-}
-
-var argsTests = []struct {
- title string
- actual redis.Args
- expected redis.Args
-}{
- {"struct ptr",
- redis.Args{}.AddFlat(&struct {
- I int `redis:"i"`
- U uint `redis:"u"`
- S string `redis:"s"`
- P []byte `redis:"p"`
- M map[string]string `redis:"m"`
- Bt bool
- Bf bool
- }{
- -1234, 5678, "hello", []byte("world"), map[string]string{"hello": "world"}, true, false,
- }),
- redis.Args{"i", int(-1234), "u", uint(5678), "s", "hello", "p", []byte("world"), "m", map[string]string{"hello": "world"}, "Bt", true, "Bf", false},
- },
- {"struct",
- redis.Args{}.AddFlat(struct{ I int }{123}),
- redis.Args{"I", 123},
- },
- {"slice",
- redis.Args{}.Add(1).AddFlat([]string{"a", "b", "c"}).Add(2),
- redis.Args{1, "a", "b", "c", 2},
- },
- {"struct omitempty",
- redis.Args{}.AddFlat(&struct {
- I int `redis:"i,omitempty"`
- U uint `redis:"u,omitempty"`
- S string `redis:"s,omitempty"`
- P []byte `redis:"p,omitempty"`
- M map[string]string `redis:"m,omitempty"`
- Bt bool `redis:"Bt,omitempty"`
- Bf bool `redis:"Bf,omitempty"`
- }{
- 0, 0, "", []byte{}, map[string]string{}, true, false,
- }),
- redis.Args{"Bt", true},
- },
-}
-
-func TestArgs(t *testing.T) {
- for _, tt := range argsTests {
- if !reflect.DeepEqual(tt.actual, tt.expected) {
- t.Fatalf("%s is %v, want %v", tt.title, tt.actual, tt.expected)
- }
- }
-}
-
-func ExampleArgs() {
- c, err := dial()
- if err != nil {
- fmt.Println(err)
- return
- }
- defer c.Close()
-
- var p1, p2 struct {
- Title string `redis:"title"`
- Author string `redis:"author"`
- Body string `redis:"body"`
- }
-
- p1.Title = "Example"
- p1.Author = "Gary"
- p1.Body = "Hello"
-
- if _, err := c.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil {
- fmt.Println(err)
- return
- }
-
- m := map[string]string{
- "title": "Example2",
- "author": "Steve",
- "body": "Map",
- }
-
- if _, err := c.Do("HMSET", redis.Args{}.Add("id2").AddFlat(m)...); err != nil {
- fmt.Println(err)
- return
- }
-
- for _, id := range []string{"id1", "id2"} {
-
- v, err := redis.Values(c.Do("HGETALL", id))
- if err != nil {
- fmt.Println(err)
- return
- }
-
- if err := redis.ScanStruct(v, &p2); err != nil {
- fmt.Println(err)
- return
- }
-
- fmt.Printf("%+v\n", p2)
- }
-
- // Output:
- // {Title:Example Author:Gary Body:Hello}
- // {Title:Example2 Author:Steve Body:Map}
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/script.go b/vendor/src/github.com/garyburd/redigo/redis/script.go
deleted file mode 100644
index 78605a9..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/script.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis
-
-import (
- "crypto/sha1"
- "encoding/hex"
- "io"
- "strings"
-)
-
-// Script encapsulates the source, hash and key count for a Lua script. See
-// http://redis.io/commands/eval for information on scripts in Redis.
-type Script struct {
- keyCount int
- src string
- hash string
-}
-
-// NewScript returns a new script object. If keyCount is greater than or equal
-// to zero, then the count is automatically inserted in the EVAL command
-// argument list. If keyCount is less than zero, then the application supplies
-// the count as the first value in the keysAndArgs argument to the Do, Send and
-// SendHash methods.
-func NewScript(keyCount int, src string) *Script {
- h := sha1.New()
- io.WriteString(h, src)
- return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))}
-}
-
-func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} {
- var args []interface{}
- if s.keyCount < 0 {
- args = make([]interface{}, 1+len(keysAndArgs))
- args[0] = spec
- copy(args[1:], keysAndArgs)
- } else {
- args = make([]interface{}, 2+len(keysAndArgs))
- args[0] = spec
- args[1] = s.keyCount
- copy(args[2:], keysAndArgs)
- }
- return args
-}
-
-// Do evaluates the script. Under the covers, Do optimistically evaluates the
-// script using the EVALSHA command. If the command fails because the script is
-// not loaded, then Do evaluates the script using the EVAL command (thus
-// causing the script to load).
-func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) {
- v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...)
- if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") {
- v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...)
- }
- return v, err
-}
-
-// SendHash evaluates the script without waiting for the reply. The script is
-// evaluated with the EVALSHA command. The application must ensure that the
-// script is loaded by a previous call to Send, Do or Load methods.
-func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error {
- return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...)
-}
-
-// Send evaluates the script without waiting for the reply.
-func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error {
- return c.Send("EVAL", s.args(s.src, keysAndArgs)...)
-}
-
-// Load loads the script without evaluating it.
-func (s *Script) Load(c Conn) error {
- _, err := c.Do("SCRIPT", "LOAD", s.src)
- return err
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/script_test.go b/vendor/src/github.com/garyburd/redigo/redis/script_test.go
deleted file mode 100644
index af28241..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/script_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis_test
-
-import (
- "fmt"
- "reflect"
- "testing"
- "time"
-
- "github.com/garyburd/redigo/redis"
-)
-
-var (
- // These variables are declared at package level to remove distracting
- // details from the examples.
- c redis.Conn
- reply interface{}
- err error
-)
-
-func ExampleScript() {
- // Initialize a package-level variable with a script.
- var getScript = redis.NewScript(1, `return redis.call('get', KEYS[1])`)
-
- // In a function, use the script Do method to evaluate the script. The Do
- // method optimistically uses the EVALSHA command. If the script is not
- // loaded, then the Do method falls back to the EVAL command.
- reply, err = getScript.Do(c, "foo")
-}
-
-func TestScript(t *testing.T) {
- c, err := redis.DialDefaultServer()
- if err != nil {
- t.Fatalf("error connection to database, %v", err)
- }
- defer c.Close()
-
- // To test fall back in Do, we make script unique by adding comment with current time.
- script := fmt.Sprintf("--%d\nreturn {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", time.Now().UnixNano())
- s := redis.NewScript(2, script)
- reply := []interface{}{[]byte("key1"), []byte("key2"), []byte("arg1"), []byte("arg2")}
-
- v, err := s.Do(c, "key1", "key2", "arg1", "arg2")
- if err != nil {
- t.Errorf("s.Do(c, ...) returned %v", err)
- }
-
- if !reflect.DeepEqual(v, reply) {
- t.Errorf("s.Do(c, ..); = %v, want %v", v, reply)
- }
-
- err = s.Load(c)
- if err != nil {
- t.Errorf("s.Load(c) returned %v", err)
- }
-
- err = s.SendHash(c, "key1", "key2", "arg1", "arg2")
- if err != nil {
- t.Errorf("s.SendHash(c, ...) returned %v", err)
- }
-
- err = c.Flush()
- if err != nil {
- t.Errorf("c.Flush() returned %v", err)
- }
-
- v, err = c.Receive()
- if !reflect.DeepEqual(v, reply) {
- t.Errorf("s.SendHash(c, ..); c.Receive() = %v, want %v", v, reply)
- }
-
- err = s.Send(c, "key1", "key2", "arg1", "arg2")
- if err != nil {
- t.Errorf("s.Send(c, ...) returned %v", err)
- }
-
- err = c.Flush()
- if err != nil {
- t.Errorf("c.Flush() returned %v", err)
- }
-
- v, err = c.Receive()
- if !reflect.DeepEqual(v, reply) {
- t.Errorf("s.Send(c, ..); c.Receive() = %v, want %v", v, reply)
- }
-
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/test_test.go b/vendor/src/github.com/garyburd/redigo/redis/test_test.go
deleted file mode 100644
index 7240fa1..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/test_test.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2012 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis
-
-import (
- "bufio"
- "errors"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-)
-
-func SetNowFunc(f func() time.Time) {
- nowFunc = f
-}
-
-var (
- ErrNegativeInt = errNegativeInt
-
- serverPath = flag.String("redis-server", "redis-server", "Path to redis server binary")
- serverBasePort = flag.Int("redis-port", 16379, "Beginning of port range for test servers")
- serverLogName = flag.String("redis-log", "", "Write Redis server logs to `filename`")
- serverLog = ioutil.Discard
-
- defaultServerMu sync.Mutex
- defaultServer *Server
- defaultServerErr error
-)
-
-type Server struct {
- name string
- cmd *exec.Cmd
- done chan struct{}
-}
-
-func NewServer(name string, args ...string) (*Server, error) {
- s := &Server{
- name: name,
- cmd: exec.Command(*serverPath, args...),
- done: make(chan struct{}),
- }
-
- r, err := s.cmd.StdoutPipe()
- if err != nil {
- return nil, err
- }
-
- err = s.cmd.Start()
- if err != nil {
- return nil, err
- }
-
- ready := make(chan error, 1)
- go s.watch(r, ready)
-
- select {
- case err = <-ready:
- case <-time.After(time.Second * 10):
- err = errors.New("timeout waiting for server to start")
- }
-
- if err != nil {
- s.Stop()
- return nil, err
- }
-
- return s, nil
-}
-
-func (s *Server) watch(r io.Reader, ready chan error) {
- fmt.Fprintf(serverLog, "%d START %s \n", s.cmd.Process.Pid, s.name)
- var listening bool
- var text string
- scn := bufio.NewScanner(r)
- for scn.Scan() {
- text = scn.Text()
- fmt.Fprintf(serverLog, "%s\n", text)
- if !listening {
- if strings.Contains(text, "The server is now ready to accept connections on port") {
- listening = true
- ready <- nil
- }
- }
- }
- if !listening {
- ready <- fmt.Errorf("server exited: %s", text)
- }
- s.cmd.Wait()
- fmt.Fprintf(serverLog, "%d STOP %s \n", s.cmd.Process.Pid, s.name)
- close(s.done)
-}
-
-func (s *Server) Stop() {
- s.cmd.Process.Signal(os.Interrupt)
- <-s.done
-}
-
-// stopDefaultServer stops the server created by DialDefaultServer.
-func stopDefaultServer() {
- defaultServerMu.Lock()
- defer defaultServerMu.Unlock()
- if defaultServer != nil {
- defaultServer.Stop()
- defaultServer = nil
- }
-}
-
-// startDefaultServer starts the default server if not already running.
-func startDefaultServer() error {
- defaultServerMu.Lock()
- defer defaultServerMu.Unlock()
- if defaultServer != nil || defaultServerErr != nil {
- return defaultServerErr
- }
- defaultServer, defaultServerErr = NewServer(
- "default",
- "--port", strconv.Itoa(*serverBasePort),
- "--save", "",
- "--appendonly", "no")
- return defaultServerErr
-}
-
-// DialDefaultServer starts the test server if not already started and dials a
-// connection to the server.
-func DialDefaultServer() (Conn, error) {
- if err := startDefaultServer(); err != nil {
- return nil, err
- }
- c, err := Dial("tcp", fmt.Sprintf(":%d", *serverBasePort), DialReadTimeout(1*time.Second), DialWriteTimeout(1*time.Second))
- if err != nil {
- return nil, err
- }
- c.Do("FLUSHDB")
- return c, nil
-}
-
-func TestMain(m *testing.M) {
- os.Exit(func() int {
- flag.Parse()
-
- var f *os.File
- if *serverLogName != "" {
- var err error
- f, err = os.OpenFile(*serverLogName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error opening redis-log: %v\n", err)
- return 1
- }
- defer f.Close()
- serverLog = f
- }
-
- defer stopDefaultServer()
-
- return m.Run()
- }())
-}
diff --git a/vendor/src/github.com/garyburd/redigo/redis/zpop_example_test.go b/vendor/src/github.com/garyburd/redigo/redis/zpop_example_test.go
deleted file mode 100644
index 1d86ee6..0000000
--- a/vendor/src/github.com/garyburd/redigo/redis/zpop_example_test.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2013 Gary Burd
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package redis_test
-
-import (
- "fmt"
- "github.com/garyburd/redigo/redis"
-)
-
-// zpop pops a value from the ZSET key using WATCH/MULTI/EXEC commands.
-func zpop(c redis.Conn, key string) (result string, err error) {
-
- defer func() {
- // Return connection to normal state on error.
- if err != nil {
- c.Do("DISCARD")
- }
- }()
-
- // Loop until transaction is successful.
- for {
- if _, err := c.Do("WATCH", key); err != nil {
- return "", err
- }
-
- members, err := redis.Strings(c.Do("ZRANGE", key, 0, 0))
- if err != nil {
- return "", err
- }
- if len(members) != 1 {
- return "", redis.ErrNil
- }
-
- c.Send("MULTI")
- c.Send("ZREM", key, members[0])
- queued, err := c.Do("EXEC")
- if err != nil {
- return "", err
- }
-
- if queued != nil {
- result = members[0]
- break
- }
- }
-
- return result, nil
-}
-
-// zpopScript pops a value from a ZSET.
-var zpopScript = redis.NewScript(1, `
- local r = redis.call('ZRANGE', KEYS[1], 0, 0)
- if r ~= nil then
- r = r[1]
- redis.call('ZREM', KEYS[1], r)
- end
- return r
-`)
-
-// This example implements ZPOP as described at
-// http://redis.io/topics/transactions using WATCH/MULTI/EXEC and scripting.
-func Example_zpop() {
- c, err := dial()
- if err != nil {
- fmt.Println(err)
- return
- }
- defer c.Close()
-
- // Add test data using a pipeline.
-
- for i, member := range []string{"red", "blue", "green"} {
- c.Send("ZADD", "zset", i, member)
- }
- if _, err := c.Do(""); err != nil {
- fmt.Println(err)
- return
- }
-
- // Pop using WATCH/MULTI/EXEC
-
- v, err := zpop(c, "zset")
- if err != nil {
- fmt.Println(err)
- return
- }
- fmt.Println(v)
-
- // Pop using a script.
-
- v, err = redis.String(zpopScript.Do(c, "zset"))
- if err != nil {
- fmt.Println(err)
- return
- }
- fmt.Println(v)
-
- // Output:
- // red
- // blue
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/Makefile b/vendor/src/github.com/golang/protobuf/proto/Makefile
deleted file mode 100644
index e2e0651..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-#
-# Copyright 2010 The Go Authors. All rights reserved.
-# https://github.com/golang/protobuf
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-install:
- go install
-
-test: install generate-test-pbs
- go test
-
-
-generate-test-pbs:
- make install
- make -C testdata
- protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
- make
diff --git a/vendor/src/github.com/golang/protobuf/proto/all_test.go b/vendor/src/github.com/golang/protobuf/proto/all_test.go
deleted file mode 100644
index 41451a4..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/all_test.go
+++ /dev/null
@@ -1,2278 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "math"
- "math/rand"
- "reflect"
- "runtime/debug"
- "strings"
- "testing"
- "time"
-
- . "github.com/golang/protobuf/proto"
- . "github.com/golang/protobuf/proto/testdata"
-)
-
-var globalO *Buffer
-
-func old() *Buffer {
- if globalO == nil {
- globalO = NewBuffer(nil)
- }
- globalO.Reset()
- return globalO
-}
-
-func equalbytes(b1, b2 []byte, t *testing.T) {
- if len(b1) != len(b2) {
- t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2))
- return
- }
- for i := 0; i < len(b1); i++ {
- if b1[i] != b2[i] {
- t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2)
- }
- }
-}
-
-func initGoTestField() *GoTestField {
- f := new(GoTestField)
- f.Label = String("label")
- f.Type = String("type")
- return f
-}
-
-// These are all structurally equivalent but the tag numbers differ.
-// (It's remarkable that required, optional, and repeated all have
-// 8 letters.)
-func initGoTest_RequiredGroup() *GoTest_RequiredGroup {
- return &GoTest_RequiredGroup{
- RequiredField: String("required"),
- }
-}
-
-func initGoTest_OptionalGroup() *GoTest_OptionalGroup {
- return &GoTest_OptionalGroup{
- RequiredField: String("optional"),
- }
-}
-
-func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {
- return &GoTest_RepeatedGroup{
- RequiredField: String("repeated"),
- }
-}
-
-func initGoTest(setdefaults bool) *GoTest {
- pb := new(GoTest)
- if setdefaults {
- pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)
- pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)
- pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)
- pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)
- pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)
- pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)
- pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)
- pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)
- pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)
- pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)
- pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted
- pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)
- pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)
- }
-
- pb.Kind = GoTest_TIME.Enum()
- pb.RequiredField = initGoTestField()
- pb.F_BoolRequired = Bool(true)
- pb.F_Int32Required = Int32(3)
- pb.F_Int64Required = Int64(6)
- pb.F_Fixed32Required = Uint32(32)
- pb.F_Fixed64Required = Uint64(64)
- pb.F_Uint32Required = Uint32(3232)
- pb.F_Uint64Required = Uint64(6464)
- pb.F_FloatRequired = Float32(3232)
- pb.F_DoubleRequired = Float64(6464)
- pb.F_StringRequired = String("string")
- pb.F_BytesRequired = []byte("bytes")
- pb.F_Sint32Required = Int32(-32)
- pb.F_Sint64Required = Int64(-64)
- pb.Requiredgroup = initGoTest_RequiredGroup()
-
- return pb
-}
-
-func fail(msg string, b *bytes.Buffer, s string, t *testing.T) {
- data := b.Bytes()
- ld := len(data)
- ls := len(s) / 2
-
- fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls)
-
- // find the interesting spot - n
- n := ls
- if ld < ls {
- n = ld
- }
- j := 0
- for i := 0; i < n; i++ {
- bs := hex(s[j])*16 + hex(s[j+1])
- j += 2
- if data[i] == bs {
- continue
- }
- n = i
- break
- }
- l := n - 10
- if l < 0 {
- l = 0
- }
- h := n + 10
-
- // find the interesting spot - n
- fmt.Printf("is[%d]:", l)
- for i := l; i < h; i++ {
- if i >= ld {
- fmt.Printf(" --")
- continue
- }
- fmt.Printf(" %.2x", data[i])
- }
- fmt.Printf("\n")
-
- fmt.Printf("sb[%d]:", l)
- for i := l; i < h; i++ {
- if i >= ls {
- fmt.Printf(" --")
- continue
- }
- bs := hex(s[j])*16 + hex(s[j+1])
- j += 2
- fmt.Printf(" %.2x", bs)
- }
- fmt.Printf("\n")
-
- t.Fail()
-
- // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes())
- // Print the output in a partially-decoded format; can
- // be helpful when updating the test. It produces the output
- // that is pasted, with minor edits, into the argument to verify().
- // data := b.Bytes()
- // nesting := 0
- // for b.Len() > 0 {
- // start := len(data) - b.Len()
- // var u uint64
- // u, err := DecodeVarint(b)
- // if err != nil {
- // fmt.Printf("decode error on varint:", err)
- // return
- // }
- // wire := u & 0x7
- // tag := u >> 3
- // switch wire {
- // case WireVarint:
- // v, err := DecodeVarint(b)
- // if err != nil {
- // fmt.Printf("decode error on varint:", err)
- // return
- // }
- // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
- // data[start:len(data)-b.Len()], tag, wire, v)
- // case WireFixed32:
- // v, err := DecodeFixed32(b)
- // if err != nil {
- // fmt.Printf("decode error on fixed32:", err)
- // return
- // }
- // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
- // data[start:len(data)-b.Len()], tag, wire, v)
- // case WireFixed64:
- // v, err := DecodeFixed64(b)
- // if err != nil {
- // fmt.Printf("decode error on fixed64:", err)
- // return
- // }
- // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
- // data[start:len(data)-b.Len()], tag, wire, v)
- // case WireBytes:
- // nb, err := DecodeVarint(b)
- // if err != nil {
- // fmt.Printf("decode error on bytes:", err)
- // return
- // }
- // after_tag := len(data) - b.Len()
- // str := make([]byte, nb)
- // _, err = b.Read(str)
- // if err != nil {
- // fmt.Printf("decode error on bytes:", err)
- // return
- // }
- // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n",
- // data[start:after_tag], str, tag, wire)
- // case WireStartGroup:
- // nesting++
- // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n",
- // data[start:len(data)-b.Len()], tag, nesting)
- // case WireEndGroup:
- // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n",
- // data[start:len(data)-b.Len()], tag, nesting)
- // nesting--
- // default:
- // fmt.Printf("unrecognized wire type %d\n", wire)
- // return
- // }
- // }
-}
-
-func hex(c uint8) uint8 {
- if '0' <= c && c <= '9' {
- return c - '0'
- }
- if 'a' <= c && c <= 'f' {
- return 10 + c - 'a'
- }
- if 'A' <= c && c <= 'F' {
- return 10 + c - 'A'
- }
- return 0
-}
-
-func equal(b []byte, s string, t *testing.T) bool {
- if 2*len(b) != len(s) {
- // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t)
- fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s))
- return false
- }
- for i, j := 0, 0; i < len(b); i, j = i+1, j+2 {
- x := hex(s[j])*16 + hex(s[j+1])
- if b[i] != x {
- // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t)
- fmt.Printf("bad byte[%d]:%x %x", i, b[i], x)
- return false
- }
- }
- return true
-}
-
-func overify(t *testing.T, pb *GoTest, expected string) {
- o := old()
- err := o.Marshal(pb)
- if err != nil {
- fmt.Printf("overify marshal-1 err = %v", err)
- o.DebugPrint("", o.Bytes())
- t.Fatalf("expected = %s", expected)
- }
- if !equal(o.Bytes(), expected, t) {
- o.DebugPrint("overify neq 1", o.Bytes())
- t.Fatalf("expected = %s", expected)
- }
-
- // Now test Unmarshal by recreating the original buffer.
- pbd := new(GoTest)
- err = o.Unmarshal(pbd)
- if err != nil {
- t.Fatalf("overify unmarshal err = %v", err)
- o.DebugPrint("", o.Bytes())
- t.Fatalf("string = %s", expected)
- }
- o.Reset()
- err = o.Marshal(pbd)
- if err != nil {
- t.Errorf("overify marshal-2 err = %v", err)
- o.DebugPrint("", o.Bytes())
- t.Fatalf("string = %s", expected)
- }
- if !equal(o.Bytes(), expected, t) {
- o.DebugPrint("overify neq 2", o.Bytes())
- t.Fatalf("string = %s", expected)
- }
-}
-
-// Simple tests for numeric encode/decode primitives (varint, etc.)
-func TestNumericPrimitives(t *testing.T) {
- for i := uint64(0); i < 1e6; i += 111 {
- o := old()
- if o.EncodeVarint(i) != nil {
- t.Error("EncodeVarint")
- break
- }
- x, e := o.DecodeVarint()
- if e != nil {
- t.Fatal("DecodeVarint")
- }
- if x != i {
- t.Fatal("varint decode fail:", i, x)
- }
-
- o = old()
- if o.EncodeFixed32(i) != nil {
- t.Fatal("encFixed32")
- }
- x, e = o.DecodeFixed32()
- if e != nil {
- t.Fatal("decFixed32")
- }
- if x != i {
- t.Fatal("fixed32 decode fail:", i, x)
- }
-
- o = old()
- if o.EncodeFixed64(i*1234567) != nil {
- t.Error("encFixed64")
- break
- }
- x, e = o.DecodeFixed64()
- if e != nil {
- t.Error("decFixed64")
- break
- }
- if x != i*1234567 {
- t.Error("fixed64 decode fail:", i*1234567, x)
- break
- }
-
- o = old()
- i32 := int32(i - 12345)
- if o.EncodeZigzag32(uint64(i32)) != nil {
- t.Fatal("EncodeZigzag32")
- }
- x, e = o.DecodeZigzag32()
- if e != nil {
- t.Fatal("DecodeZigzag32")
- }
- if x != uint64(uint32(i32)) {
- t.Fatal("zigzag32 decode fail:", i32, x)
- }
-
- o = old()
- i64 := int64(i - 12345)
- if o.EncodeZigzag64(uint64(i64)) != nil {
- t.Fatal("EncodeZigzag64")
- }
- x, e = o.DecodeZigzag64()
- if e != nil {
- t.Fatal("DecodeZigzag64")
- }
- if x != uint64(i64) {
- t.Fatal("zigzag64 decode fail:", i64, x)
- }
- }
-}
-
-// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces.
-type fakeMarshaler struct {
- b []byte
- err error
-}
-
-func (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err }
-func (f *fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) }
-func (f *fakeMarshaler) ProtoMessage() {}
-func (f *fakeMarshaler) Reset() {}
-
-type msgWithFakeMarshaler struct {
- M *fakeMarshaler `protobuf:"bytes,1,opt,name=fake"`
-}
-
-func (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) }
-func (m *msgWithFakeMarshaler) ProtoMessage() {}
-func (m *msgWithFakeMarshaler) Reset() {}
-
-// Simple tests for proto messages that implement the Marshaler interface.
-func TestMarshalerEncoding(t *testing.T) {
- tests := []struct {
- name string
- m Message
- want []byte
- errType reflect.Type
- }{
- {
- name: "Marshaler that fails",
- m: &fakeMarshaler{
- err: errors.New("some marshal err"),
- b: []byte{5, 6, 7},
- },
- // Since the Marshal method returned bytes, they should be written to the
- // buffer. (For efficiency, we assume that Marshal implementations are
- // always correct w.r.t. RequiredNotSetError and output.)
- want: []byte{5, 6, 7},
- errType: reflect.TypeOf(errors.New("some marshal err")),
- },
- {
- name: "Marshaler that fails with RequiredNotSetError",
- m: &msgWithFakeMarshaler{
- M: &fakeMarshaler{
- err: &RequiredNotSetError{},
- b: []byte{5, 6, 7},
- },
- },
- // Since there's an error that can be continued after,
- // the buffer should be written.
- want: []byte{
- 10, 3, // for &msgWithFakeMarshaler
- 5, 6, 7, // for &fakeMarshaler
- },
- errType: reflect.TypeOf(&RequiredNotSetError{}),
- },
- {
- name: "Marshaler that succeeds",
- m: &fakeMarshaler{
- b: []byte{0, 1, 2, 3, 4, 127, 255},
- },
- want: []byte{0, 1, 2, 3, 4, 127, 255},
- },
- }
- for _, test := range tests {
- b := NewBuffer(nil)
- err := b.Marshal(test.m)
- if reflect.TypeOf(err) != test.errType {
- t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType)
- }
- if !reflect.DeepEqual(test.want, b.Bytes()) {
- t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want)
- }
- if size := Size(test.m); size != len(b.Bytes()) {
- t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes()))
- }
-
- m, mErr := Marshal(test.m)
- if !bytes.Equal(b.Bytes(), m) {
- t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes())
- }
- if !reflect.DeepEqual(err, mErr) {
- t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q",
- test.name, fmt.Sprint(mErr), fmt.Sprint(err))
- }
- }
-}
-
-// Simple tests for bytes
-func TestBytesPrimitives(t *testing.T) {
- o := old()
- bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'}
- if o.EncodeRawBytes(bytes) != nil {
- t.Error("EncodeRawBytes")
- }
- decb, e := o.DecodeRawBytes(false)
- if e != nil {
- t.Error("DecodeRawBytes")
- }
- equalbytes(bytes, decb, t)
-}
-
-// Simple tests for strings
-func TestStringPrimitives(t *testing.T) {
- o := old()
- s := "now is the time"
- if o.EncodeStringBytes(s) != nil {
- t.Error("enc_string")
- }
- decs, e := o.DecodeStringBytes()
- if e != nil {
- t.Error("dec_string")
- }
- if s != decs {
- t.Error("string encode/decode fail:", s, decs)
- }
-}
-
-// Do we catch the "required bit not set" case?
-func TestRequiredBit(t *testing.T) {
- o := old()
- pb := new(GoTest)
- err := o.Marshal(pb)
- if err == nil {
- t.Error("did not catch missing required fields")
- } else if strings.Index(err.Error(), "Kind") < 0 {
- t.Error("wrong error type:", err)
- }
-}
-
-// Check that all fields are nil.
-// Clearly silly, and a residue from a more interesting test with an earlier,
-// different initialization property, but it once caught a compiler bug so
-// it lives.
-func checkInitialized(pb *GoTest, t *testing.T) {
- if pb.F_BoolDefaulted != nil {
- t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted)
- }
- if pb.F_Int32Defaulted != nil {
- t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted)
- }
- if pb.F_Int64Defaulted != nil {
- t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted)
- }
- if pb.F_Fixed32Defaulted != nil {
- t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted)
- }
- if pb.F_Fixed64Defaulted != nil {
- t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted)
- }
- if pb.F_Uint32Defaulted != nil {
- t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted)
- }
- if pb.F_Uint64Defaulted != nil {
- t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted)
- }
- if pb.F_FloatDefaulted != nil {
- t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted)
- }
- if pb.F_DoubleDefaulted != nil {
- t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted)
- }
- if pb.F_StringDefaulted != nil {
- t.Error("New or Reset did not set string:", *pb.F_StringDefaulted)
- }
- if pb.F_BytesDefaulted != nil {
- t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted))
- }
- if pb.F_Sint32Defaulted != nil {
- t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted)
- }
- if pb.F_Sint64Defaulted != nil {
- t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted)
- }
-}
-
-// Does Reset() reset?
-func TestReset(t *testing.T) {
- pb := initGoTest(true)
- // muck with some values
- pb.F_BoolDefaulted = Bool(false)
- pb.F_Int32Defaulted = Int32(237)
- pb.F_Int64Defaulted = Int64(12346)
- pb.F_Fixed32Defaulted = Uint32(32000)
- pb.F_Fixed64Defaulted = Uint64(666)
- pb.F_Uint32Defaulted = Uint32(323232)
- pb.F_Uint64Defaulted = nil
- pb.F_FloatDefaulted = nil
- pb.F_DoubleDefaulted = Float64(0)
- pb.F_StringDefaulted = String("gotcha")
- pb.F_BytesDefaulted = []byte("asdfasdf")
- pb.F_Sint32Defaulted = Int32(123)
- pb.F_Sint64Defaulted = Int64(789)
- pb.Reset()
- checkInitialized(pb, t)
-}
-
-// All required fields set, no defaults provided.
-func TestEncodeDecode1(t *testing.T) {
- pb := initGoTest(false)
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 0x20
- "714000000000000000"+ // field 14, encoding 1, value 0x40
- "78a019"+ // field 15, encoding 0, value 0xca0 = 3232
- "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string"
- "b304"+ // field 70, encoding 3, start group
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // field 70, encoding 4, end group
- "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f") // field 103, encoding 0, 0x7f zigzag64
-}
-
-// All required fields set, defaults provided.
-func TestEncodeDecode2(t *testing.T) {
- pb := initGoTest(true)
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 32
- "714000000000000000"+ // field 14, encoding 1, value 64
- "78a019"+ // field 15, encoding 0, value 3232
- "8001c032"+ // field 16, encoding 0, value 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
- "c00201"+ // field 40, encoding 0, value 1
- "c80220"+ // field 41, encoding 0, value 32
- "d00240"+ // field 42, encoding 0, value 64
- "dd0240010000"+ // field 43, encoding 5, value 320
- "e1028002000000000000"+ // field 44, encoding 1, value 640
- "e8028019"+ // field 45, encoding 0, value 3200
- "f0028032"+ // field 46, encoding 0, value 6400
- "fd02e0659948"+ // field 47, encoding 5, value 314159.0
- "81030000000050971041"+ // field 48, encoding 1, value 271828.0
- "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
- "b304"+ // start group field 70 level 1
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // end group field 70 level 1
- "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
- "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
- "90193f"+ // field 402, encoding 0, value 63
- "98197f") // field 403, encoding 0, value 127
-
-}
-
-// All default fields set to their default value by hand
-func TestEncodeDecode3(t *testing.T) {
- pb := initGoTest(false)
- pb.F_BoolDefaulted = Bool(true)
- pb.F_Int32Defaulted = Int32(32)
- pb.F_Int64Defaulted = Int64(64)
- pb.F_Fixed32Defaulted = Uint32(320)
- pb.F_Fixed64Defaulted = Uint64(640)
- pb.F_Uint32Defaulted = Uint32(3200)
- pb.F_Uint64Defaulted = Uint64(6400)
- pb.F_FloatDefaulted = Float32(314159)
- pb.F_DoubleDefaulted = Float64(271828)
- pb.F_StringDefaulted = String("hello, \"world!\"\n")
- pb.F_BytesDefaulted = []byte("Bignose")
- pb.F_Sint32Defaulted = Int32(-32)
- pb.F_Sint64Defaulted = Int64(-64)
-
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 32
- "714000000000000000"+ // field 14, encoding 1, value 64
- "78a019"+ // field 15, encoding 0, value 3232
- "8001c032"+ // field 16, encoding 0, value 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
- "c00201"+ // field 40, encoding 0, value 1
- "c80220"+ // field 41, encoding 0, value 32
- "d00240"+ // field 42, encoding 0, value 64
- "dd0240010000"+ // field 43, encoding 5, value 320
- "e1028002000000000000"+ // field 44, encoding 1, value 640
- "e8028019"+ // field 45, encoding 0, value 3200
- "f0028032"+ // field 46, encoding 0, value 6400
- "fd02e0659948"+ // field 47, encoding 5, value 314159.0
- "81030000000050971041"+ // field 48, encoding 1, value 271828.0
- "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
- "b304"+ // start group field 70 level 1
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // end group field 70 level 1
- "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
- "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
- "90193f"+ // field 402, encoding 0, value 63
- "98197f") // field 403, encoding 0, value 127
-
-}
-
-// All required fields set, defaults provided, all non-defaulted optional fields have values.
-func TestEncodeDecode4(t *testing.T) {
- pb := initGoTest(true)
- pb.Table = String("hello")
- pb.Param = Int32(7)
- pb.OptionalField = initGoTestField()
- pb.F_BoolOptional = Bool(true)
- pb.F_Int32Optional = Int32(32)
- pb.F_Int64Optional = Int64(64)
- pb.F_Fixed32Optional = Uint32(3232)
- pb.F_Fixed64Optional = Uint64(6464)
- pb.F_Uint32Optional = Uint32(323232)
- pb.F_Uint64Optional = Uint64(646464)
- pb.F_FloatOptional = Float32(32.)
- pb.F_DoubleOptional = Float64(64.)
- pb.F_StringOptional = String("hello")
- pb.F_BytesOptional = []byte("Bignose")
- pb.F_Sint32Optional = Int32(-32)
- pb.F_Sint64Optional = Int64(-64)
- pb.Optionalgroup = initGoTest_OptionalGroup()
-
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello"
- "1807"+ // field 3, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 32
- "714000000000000000"+ // field 14, encoding 1, value 64
- "78a019"+ // field 15, encoding 0, value 3232
- "8001c032"+ // field 16, encoding 0, value 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
- "f00101"+ // field 30, encoding 0, value 1
- "f80120"+ // field 31, encoding 0, value 32
- "800240"+ // field 32, encoding 0, value 64
- "8d02a00c0000"+ // field 33, encoding 5, value 3232
- "91024019000000000000"+ // field 34, encoding 1, value 6464
- "9802a0dd13"+ // field 35, encoding 0, value 323232
- "a002c0ba27"+ // field 36, encoding 0, value 646464
- "ad0200000042"+ // field 37, encoding 5, value 32.0
- "b1020000000000005040"+ // field 38, encoding 1, value 64.0
- "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello"
- "c00201"+ // field 40, encoding 0, value 1
- "c80220"+ // field 41, encoding 0, value 32
- "d00240"+ // field 42, encoding 0, value 64
- "dd0240010000"+ // field 43, encoding 5, value 320
- "e1028002000000000000"+ // field 44, encoding 1, value 640
- "e8028019"+ // field 45, encoding 0, value 3200
- "f0028032"+ // field 46, encoding 0, value 6400
- "fd02e0659948"+ // field 47, encoding 5, value 314159.0
- "81030000000050971041"+ // field 48, encoding 1, value 271828.0
- "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
- "b304"+ // start group field 70 level 1
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // end group field 70 level 1
- "d305"+ // start group field 90 level 1
- "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional"
- "d405"+ // end group field 90 level 1
- "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
- "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose"
- "f0123f"+ // field 302, encoding 0, value 63
- "f8127f"+ // field 303, encoding 0, value 127
- "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
- "90193f"+ // field 402, encoding 0, value 63
- "98197f") // field 403, encoding 0, value 127
-
-}
-
-// All required fields set, defaults provided, all repeated fields given two values.
-func TestEncodeDecode5(t *testing.T) {
- pb := initGoTest(true)
- pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()}
- pb.F_BoolRepeated = []bool{false, true}
- pb.F_Int32Repeated = []int32{32, 33}
- pb.F_Int64Repeated = []int64{64, 65}
- pb.F_Fixed32Repeated = []uint32{3232, 3333}
- pb.F_Fixed64Repeated = []uint64{6464, 6565}
- pb.F_Uint32Repeated = []uint32{323232, 333333}
- pb.F_Uint64Repeated = []uint64{646464, 656565}
- pb.F_FloatRepeated = []float32{32., 33.}
- pb.F_DoubleRepeated = []float64{64., 65.}
- pb.F_StringRepeated = []string{"hello", "sailor"}
- pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")}
- pb.F_Sint32Repeated = []int32{32, -32}
- pb.F_Sint64Repeated = []int64{64, -64}
- pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()}
-
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
- "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 32
- "714000000000000000"+ // field 14, encoding 1, value 64
- "78a019"+ // field 15, encoding 0, value 3232
- "8001c032"+ // field 16, encoding 0, value 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
- "a00100"+ // field 20, encoding 0, value 0
- "a00101"+ // field 20, encoding 0, value 1
- "a80120"+ // field 21, encoding 0, value 32
- "a80121"+ // field 21, encoding 0, value 33
- "b00140"+ // field 22, encoding 0, value 64
- "b00141"+ // field 22, encoding 0, value 65
- "bd01a00c0000"+ // field 23, encoding 5, value 3232
- "bd01050d0000"+ // field 23, encoding 5, value 3333
- "c1014019000000000000"+ // field 24, encoding 1, value 6464
- "c101a519000000000000"+ // field 24, encoding 1, value 6565
- "c801a0dd13"+ // field 25, encoding 0, value 323232
- "c80195ac14"+ // field 25, encoding 0, value 333333
- "d001c0ba27"+ // field 26, encoding 0, value 646464
- "d001b58928"+ // field 26, encoding 0, value 656565
- "dd0100000042"+ // field 27, encoding 5, value 32.0
- "dd0100000442"+ // field 27, encoding 5, value 33.0
- "e1010000000000005040"+ // field 28, encoding 1, value 64.0
- "e1010000000000405040"+ // field 28, encoding 1, value 65.0
- "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello"
- "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor"
- "c00201"+ // field 40, encoding 0, value 1
- "c80220"+ // field 41, encoding 0, value 32
- "d00240"+ // field 42, encoding 0, value 64
- "dd0240010000"+ // field 43, encoding 5, value 320
- "e1028002000000000000"+ // field 44, encoding 1, value 640
- "e8028019"+ // field 45, encoding 0, value 3200
- "f0028032"+ // field 46, encoding 0, value 6400
- "fd02e0659948"+ // field 47, encoding 5, value 314159.0
- "81030000000050971041"+ // field 48, encoding 1, value 271828.0
- "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
- "b304"+ // start group field 70 level 1
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // end group field 70 level 1
- "8305"+ // start group field 80 level 1
- "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
- "8405"+ // end group field 80 level 1
- "8305"+ // start group field 80 level 1
- "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
- "8405"+ // end group field 80 level 1
- "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
- "ca0c03"+"626967"+ // field 201, encoding 2, string "big"
- "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose"
- "d00c40"+ // field 202, encoding 0, value 32
- "d00c3f"+ // field 202, encoding 0, value -32
- "d80c8001"+ // field 203, encoding 0, value 64
- "d80c7f"+ // field 203, encoding 0, value -64
- "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
- "90193f"+ // field 402, encoding 0, value 63
- "98197f") // field 403, encoding 0, value 127
-
-}
-
-// All required fields set, all packed repeated fields given two values.
-func TestEncodeDecode6(t *testing.T) {
- pb := initGoTest(false)
- pb.F_BoolRepeatedPacked = []bool{false, true}
- pb.F_Int32RepeatedPacked = []int32{32, 33}
- pb.F_Int64RepeatedPacked = []int64{64, 65}
- pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333}
- pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565}
- pb.F_Uint32RepeatedPacked = []uint32{323232, 333333}
- pb.F_Uint64RepeatedPacked = []uint64{646464, 656565}
- pb.F_FloatRepeatedPacked = []float32{32., 33.}
- pb.F_DoubleRepeatedPacked = []float64{64., 65.}
- pb.F_Sint32RepeatedPacked = []int32{32, -32}
- pb.F_Sint64RepeatedPacked = []int64{64, -64}
-
- overify(t, pb,
- "0807"+ // field 1, encoding 0, value 7
- "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
- "5001"+ // field 10, encoding 0, value 1
- "5803"+ // field 11, encoding 0, value 3
- "6006"+ // field 12, encoding 0, value 6
- "6d20000000"+ // field 13, encoding 5, value 32
- "714000000000000000"+ // field 14, encoding 1, value 64
- "78a019"+ // field 15, encoding 0, value 3232
- "8001c032"+ // field 16, encoding 0, value 6464
- "8d0100004a45"+ // field 17, encoding 5, value 3232.0
- "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
- "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
- "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1
- "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33
- "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65
- "aa0308"+ // field 53, encoding 2, 8 bytes
- "a00c0000050d0000"+ // value 3232, value 3333
- "b20310"+ // field 54, encoding 2, 16 bytes
- "4019000000000000a519000000000000"+ // value 6464, value 6565
- "ba0306"+ // field 55, encoding 2, 6 bytes
- "a0dd1395ac14"+ // value 323232, value 333333
- "c20306"+ // field 56, encoding 2, 6 bytes
- "c0ba27b58928"+ // value 646464, value 656565
- "ca0308"+ // field 57, encoding 2, 8 bytes
- "0000004200000442"+ // value 32.0, value 33.0
- "d20310"+ // field 58, encoding 2, 16 bytes
- "00000000000050400000000000405040"+ // value 64.0, value 65.0
- "b304"+ // start group field 70 level 1
- "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
- "b404"+ // end group field 70 level 1
- "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
- "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
- "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
- "b21f02"+ // field 502, encoding 2, 2 bytes
- "403f"+ // value 32, value -32
- "ba1f03"+ // field 503, encoding 2, 3 bytes
- "80017f") // value 64, value -64
-}
-
-// Test that we can encode empty bytes fields.
-func TestEncodeDecodeBytes1(t *testing.T) {
- pb := initGoTest(false)
-
- // Create our bytes
- pb.F_BytesRequired = []byte{}
- pb.F_BytesRepeated = [][]byte{{}}
- pb.F_BytesOptional = []byte{}
-
- d, err := Marshal(pb)
- if err != nil {
- t.Error(err)
- }
-
- pbd := new(GoTest)
- if err := Unmarshal(d, pbd); err != nil {
- t.Error(err)
- }
-
- if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 {
- t.Error("required empty bytes field is incorrect")
- }
- if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil {
- t.Error("repeated empty bytes field is incorrect")
- }
- if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 {
- t.Error("optional empty bytes field is incorrect")
- }
-}
-
-// Test that we encode nil-valued fields of a repeated bytes field correctly.
-// Since entries in a repeated field cannot be nil, nil must mean empty value.
-func TestEncodeDecodeBytes2(t *testing.T) {
- pb := initGoTest(false)
-
- // Create our bytes
- pb.F_BytesRepeated = [][]byte{nil}
-
- d, err := Marshal(pb)
- if err != nil {
- t.Error(err)
- }
-
- pbd := new(GoTest)
- if err := Unmarshal(d, pbd); err != nil {
- t.Error(err)
- }
-
- if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil {
- t.Error("Unexpected value for repeated bytes field")
- }
-}
-
-// All required fields set, defaults provided, all repeated fields given two values.
-func TestSkippingUnrecognizedFields(t *testing.T) {
- o := old()
- pb := initGoTestField()
-
- // Marshal it normally.
- o.Marshal(pb)
-
- // Now new a GoSkipTest record.
- skip := &GoSkipTest{
- SkipInt32: Int32(32),
- SkipFixed32: Uint32(3232),
- SkipFixed64: Uint64(6464),
- SkipString: String("skipper"),
- Skipgroup: &GoSkipTest_SkipGroup{
- GroupInt32: Int32(75),
- GroupString: String("wxyz"),
- },
- }
-
- // Marshal it into same buffer.
- o.Marshal(skip)
-
- pbd := new(GoTestField)
- o.Unmarshal(pbd)
-
- // The __unrecognized field should be a marshaling of GoSkipTest
- skipd := new(GoSkipTest)
-
- o.SetBuf(pbd.XXX_unrecognized)
- o.Unmarshal(skipd)
-
- if *skipd.SkipInt32 != *skip.SkipInt32 {
- t.Error("skip int32", skipd.SkipInt32)
- }
- if *skipd.SkipFixed32 != *skip.SkipFixed32 {
- t.Error("skip fixed32", skipd.SkipFixed32)
- }
- if *skipd.SkipFixed64 != *skip.SkipFixed64 {
- t.Error("skip fixed64", skipd.SkipFixed64)
- }
- if *skipd.SkipString != *skip.SkipString {
- t.Error("skip string", *skipd.SkipString)
- }
- if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 {
- t.Error("skip group int32", skipd.Skipgroup.GroupInt32)
- }
- if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString {
- t.Error("skip group string", *skipd.Skipgroup.GroupString)
- }
-}
-
-// Check that unrecognized fields of a submessage are preserved.
-func TestSubmessageUnrecognizedFields(t *testing.T) {
- nm := &NewMessage{
- Nested: &NewMessage_Nested{
- Name: String("Nigel"),
- FoodGroup: String("carbs"),
- },
- }
- b, err := Marshal(nm)
- if err != nil {
- t.Fatalf("Marshal of NewMessage: %v", err)
- }
-
- // Unmarshal into an OldMessage.
- om := new(OldMessage)
- if err := Unmarshal(b, om); err != nil {
- t.Fatalf("Unmarshal to OldMessage: %v", err)
- }
- exp := &OldMessage{
- Nested: &OldMessage_Nested{
- Name: String("Nigel"),
- // normal protocol buffer users should not do this
- XXX_unrecognized: []byte("\x12\x05carbs"),
- },
- }
- if !Equal(om, exp) {
- t.Errorf("om = %v, want %v", om, exp)
- }
-
- // Clone the OldMessage.
- om = Clone(om).(*OldMessage)
- if !Equal(om, exp) {
- t.Errorf("Clone(om) = %v, want %v", om, exp)
- }
-
- // Marshal the OldMessage, then unmarshal it into an empty NewMessage.
- if b, err = Marshal(om); err != nil {
- t.Fatalf("Marshal of OldMessage: %v", err)
- }
- t.Logf("Marshal(%v) -> %q", om, b)
- nm2 := new(NewMessage)
- if err := Unmarshal(b, nm2); err != nil {
- t.Fatalf("Unmarshal to NewMessage: %v", err)
- }
- if !Equal(nm, nm2) {
- t.Errorf("NewMessage round-trip: %v => %v", nm, nm2)
- }
-}
-
-// Check that an int32 field can be upgraded to an int64 field.
-func TestNegativeInt32(t *testing.T) {
- om := &OldMessage{
- Num: Int32(-1),
- }
- b, err := Marshal(om)
- if err != nil {
- t.Fatalf("Marshal of OldMessage: %v", err)
- }
-
- // Check the size. It should be 11 bytes;
- // 1 for the field/wire type, and 10 for the negative number.
- if len(b) != 11 {
- t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b)
- }
-
- // Unmarshal into a NewMessage.
- nm := new(NewMessage)
- if err := Unmarshal(b, nm); err != nil {
- t.Fatalf("Unmarshal to NewMessage: %v", err)
- }
- want := &NewMessage{
- Num: Int64(-1),
- }
- if !Equal(nm, want) {
- t.Errorf("nm = %v, want %v", nm, want)
- }
-}
-
-// Check that we can grow an array (repeated field) to have many elements.
-// This test doesn't depend only on our encoding; for variety, it makes sure
-// we create, encode, and decode the correct contents explicitly. It's therefore
-// a bit messier.
-// This test also uses (and hence tests) the Marshal/Unmarshal functions
-// instead of the methods.
-func TestBigRepeated(t *testing.T) {
- pb := initGoTest(true)
-
- // Create the arrays
- const N = 50 // Internally the library starts much smaller.
- pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N)
- pb.F_Sint64Repeated = make([]int64, N)
- pb.F_Sint32Repeated = make([]int32, N)
- pb.F_BytesRepeated = make([][]byte, N)
- pb.F_StringRepeated = make([]string, N)
- pb.F_DoubleRepeated = make([]float64, N)
- pb.F_FloatRepeated = make([]float32, N)
- pb.F_Uint64Repeated = make([]uint64, N)
- pb.F_Uint32Repeated = make([]uint32, N)
- pb.F_Fixed64Repeated = make([]uint64, N)
- pb.F_Fixed32Repeated = make([]uint32, N)
- pb.F_Int64Repeated = make([]int64, N)
- pb.F_Int32Repeated = make([]int32, N)
- pb.F_BoolRepeated = make([]bool, N)
- pb.RepeatedField = make([]*GoTestField, N)
-
- // Fill in the arrays with checkable values.
- igtf := initGoTestField()
- igtrg := initGoTest_RepeatedGroup()
- for i := 0; i < N; i++ {
- pb.Repeatedgroup[i] = igtrg
- pb.F_Sint64Repeated[i] = int64(i)
- pb.F_Sint32Repeated[i] = int32(i)
- s := fmt.Sprint(i)
- pb.F_BytesRepeated[i] = []byte(s)
- pb.F_StringRepeated[i] = s
- pb.F_DoubleRepeated[i] = float64(i)
- pb.F_FloatRepeated[i] = float32(i)
- pb.F_Uint64Repeated[i] = uint64(i)
- pb.F_Uint32Repeated[i] = uint32(i)
- pb.F_Fixed64Repeated[i] = uint64(i)
- pb.F_Fixed32Repeated[i] = uint32(i)
- pb.F_Int64Repeated[i] = int64(i)
- pb.F_Int32Repeated[i] = int32(i)
- pb.F_BoolRepeated[i] = i%2 == 0
- pb.RepeatedField[i] = igtf
- }
-
- // Marshal.
- buf, _ := Marshal(pb)
-
- // Now test Unmarshal by recreating the original buffer.
- pbd := new(GoTest)
- Unmarshal(buf, pbd)
-
- // Check the checkable values
- for i := uint64(0); i < N; i++ {
- if pbd.Repeatedgroup[i] == nil { // TODO: more checking?
- t.Error("pbd.Repeatedgroup bad")
- }
- var x uint64
- x = uint64(pbd.F_Sint64Repeated[i])
- if x != i {
- t.Error("pbd.F_Sint64Repeated bad", x, i)
- }
- x = uint64(pbd.F_Sint32Repeated[i])
- if x != i {
- t.Error("pbd.F_Sint32Repeated bad", x, i)
- }
- s := fmt.Sprint(i)
- equalbytes(pbd.F_BytesRepeated[i], []byte(s), t)
- if pbd.F_StringRepeated[i] != s {
- t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i)
- }
- x = uint64(pbd.F_DoubleRepeated[i])
- if x != i {
- t.Error("pbd.F_DoubleRepeated bad", x, i)
- }
- x = uint64(pbd.F_FloatRepeated[i])
- if x != i {
- t.Error("pbd.F_FloatRepeated bad", x, i)
- }
- x = pbd.F_Uint64Repeated[i]
- if x != i {
- t.Error("pbd.F_Uint64Repeated bad", x, i)
- }
- x = uint64(pbd.F_Uint32Repeated[i])
- if x != i {
- t.Error("pbd.F_Uint32Repeated bad", x, i)
- }
- x = pbd.F_Fixed64Repeated[i]
- if x != i {
- t.Error("pbd.F_Fixed64Repeated bad", x, i)
- }
- x = uint64(pbd.F_Fixed32Repeated[i])
- if x != i {
- t.Error("pbd.F_Fixed32Repeated bad", x, i)
- }
- x = uint64(pbd.F_Int64Repeated[i])
- if x != i {
- t.Error("pbd.F_Int64Repeated bad", x, i)
- }
- x = uint64(pbd.F_Int32Repeated[i])
- if x != i {
- t.Error("pbd.F_Int32Repeated bad", x, i)
- }
- if pbd.F_BoolRepeated[i] != (i%2 == 0) {
- t.Error("pbd.F_BoolRepeated bad", x, i)
- }
- if pbd.RepeatedField[i] == nil { // TODO: more checking?
- t.Error("pbd.RepeatedField bad")
- }
- }
-}
-
-// Verify we give a useful message when decoding to the wrong structure type.
-func TestTypeMismatch(t *testing.T) {
- pb1 := initGoTest(true)
-
- // Marshal
- o := old()
- o.Marshal(pb1)
-
- // Now Unmarshal it to the wrong type.
- pb2 := initGoTestField()
- err := o.Unmarshal(pb2)
- if err == nil {
- t.Error("expected error, got no error")
- } else if !strings.Contains(err.Error(), "bad wiretype") {
- t.Error("expected bad wiretype error, got", err)
- }
-}
-
-func encodeDecode(t *testing.T, in, out Message, msg string) {
- buf, err := Marshal(in)
- if err != nil {
- t.Fatalf("failed marshaling %v: %v", msg, err)
- }
- if err := Unmarshal(buf, out); err != nil {
- t.Fatalf("failed unmarshaling %v: %v", msg, err)
- }
-}
-
-func TestPackedNonPackedDecoderSwitching(t *testing.T) {
- np, p := new(NonPackedTest), new(PackedTest)
-
- // non-packed -> packed
- np.A = []int32{0, 1, 1, 2, 3, 5}
- encodeDecode(t, np, p, "non-packed -> packed")
- if !reflect.DeepEqual(np.A, p.B) {
- t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B)
- }
-
- // packed -> non-packed
- np.Reset()
- p.B = []int32{3, 1, 4, 1, 5, 9}
- encodeDecode(t, p, np, "packed -> non-packed")
- if !reflect.DeepEqual(p.B, np.A) {
- t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A)
- }
-}
-
-func TestProto1RepeatedGroup(t *testing.T) {
- pb := &MessageList{
- Message: []*MessageList_Message{
- {
- Name: String("blah"),
- Count: Int32(7),
- },
- // NOTE: pb.Message[1] is a nil
- nil,
- },
- }
-
- o := old()
- err := o.Marshal(pb)
- if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") {
- t.Fatalf("unexpected or no error when marshaling: %v", err)
- }
-}
-
-// Test that enums work. Checks for a bug introduced by making enums
-// named types instead of int32: newInt32FromUint64 would crash with
-// a type mismatch in reflect.PointTo.
-func TestEnum(t *testing.T) {
- pb := new(GoEnum)
- pb.Foo = FOO_FOO1.Enum()
- o := old()
- if err := o.Marshal(pb); err != nil {
- t.Fatal("error encoding enum:", err)
- }
- pb1 := new(GoEnum)
- if err := o.Unmarshal(pb1); err != nil {
- t.Fatal("error decoding enum:", err)
- }
- if *pb1.Foo != FOO_FOO1 {
- t.Error("expected 7 but got ", *pb1.Foo)
- }
-}
-
-// Enum types have String methods. Check that enum fields can be printed.
-// We don't care what the value actually is, just as long as it doesn't crash.
-func TestPrintingNilEnumFields(t *testing.T) {
- pb := new(GoEnum)
- _ = fmt.Sprintf("%+v", pb)
-}
-
-// Verify that absent required fields cause Marshal/Unmarshal to return errors.
-func TestRequiredFieldEnforcement(t *testing.T) {
- pb := new(GoTestField)
- _, err := Marshal(pb)
- if err == nil {
- t.Error("marshal: expected error, got nil")
- } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Label") {
- t.Errorf("marshal: bad error type: %v", err)
- }
-
- // A slightly sneaky, yet valid, proto. It encodes the same required field twice,
- // so simply counting the required fields is insufficient.
- // field 1, encoding 2, value "hi"
- buf := []byte("\x0A\x02hi\x0A\x02hi")
- err = Unmarshal(buf, pb)
- if err == nil {
- t.Error("unmarshal: expected error, got nil")
- } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") {
- t.Errorf("unmarshal: bad error type: %v", err)
- }
-}
-
-// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors.
-func TestRequiredFieldEnforcementGroups(t *testing.T) {
- pb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}}
- if _, err := Marshal(pb); err == nil {
- t.Error("marshal: expected error, got nil")
- } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") {
- t.Errorf("marshal: bad error type: %v", err)
- }
-
- buf := []byte{11, 12}
- if err := Unmarshal(buf, pb); err == nil {
- t.Error("unmarshal: expected error, got nil")
- } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") {
- t.Errorf("unmarshal: bad error type: %v", err)
- }
-}
-
-func TestTypedNilMarshal(t *testing.T) {
- // A typed nil should return ErrNil and not crash.
- {
- var m *GoEnum
- if _, err := Marshal(m); err != ErrNil {
- t.Errorf("Marshal(%#v): got %v, want ErrNil", m, err)
- }
- }
-
- {
- m := &Communique{Union: &Communique_Msg{nil}}
- if _, err := Marshal(m); err == nil || err == ErrNil {
- t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err)
- }
- }
-}
-
-// A type that implements the Marshaler interface, but is not nillable.
-type nonNillableInt uint64
-
-func (nni nonNillableInt) Marshal() ([]byte, error) {
- return EncodeVarint(uint64(nni)), nil
-}
-
-type NNIMessage struct {
- nni nonNillableInt
-}
-
-func (*NNIMessage) Reset() {}
-func (*NNIMessage) String() string { return "" }
-func (*NNIMessage) ProtoMessage() {}
-
-// A type that implements the Marshaler interface and is nillable.
-type nillableMessage struct {
- x uint64
-}
-
-func (nm *nillableMessage) Marshal() ([]byte, error) {
- return EncodeVarint(nm.x), nil
-}
-
-type NMMessage struct {
- nm *nillableMessage
-}
-
-func (*NMMessage) Reset() {}
-func (*NMMessage) String() string { return "" }
-func (*NMMessage) ProtoMessage() {}
-
-// Verify a type that uses the Marshaler interface, but has a nil pointer.
-func TestNilMarshaler(t *testing.T) {
- // Try a struct with a Marshaler field that is nil.
- // It should be directly marshable.
- nmm := new(NMMessage)
- if _, err := Marshal(nmm); err != nil {
- t.Error("unexpected error marshaling nmm: ", err)
- }
-
- // Try a struct with a Marshaler field that is not nillable.
- nnim := new(NNIMessage)
- nnim.nni = 7
- var _ Marshaler = nnim.nni // verify it is truly a Marshaler
- if _, err := Marshal(nnim); err != nil {
- t.Error("unexpected error marshaling nnim: ", err)
- }
-}
-
-func TestAllSetDefaults(t *testing.T) {
- // Exercise SetDefaults with all scalar field types.
- m := &Defaults{
- // NaN != NaN, so override that here.
- F_Nan: Float32(1.7),
- }
- expected := &Defaults{
- F_Bool: Bool(true),
- F_Int32: Int32(32),
- F_Int64: Int64(64),
- F_Fixed32: Uint32(320),
- F_Fixed64: Uint64(640),
- F_Uint32: Uint32(3200),
- F_Uint64: Uint64(6400),
- F_Float: Float32(314159),
- F_Double: Float64(271828),
- F_String: String(`hello, "world!"` + "\n"),
- F_Bytes: []byte("Bignose"),
- F_Sint32: Int32(-32),
- F_Sint64: Int64(-64),
- F_Enum: Defaults_GREEN.Enum(),
- F_Pinf: Float32(float32(math.Inf(1))),
- F_Ninf: Float32(float32(math.Inf(-1))),
- F_Nan: Float32(1.7),
- StrZero: String(""),
- }
- SetDefaults(m)
- if !Equal(m, expected) {
- t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected)
- }
-}
-
-func TestSetDefaultsWithSetField(t *testing.T) {
- // Check that a set value is not overridden.
- m := &Defaults{
- F_Int32: Int32(12),
- }
- SetDefaults(m)
- if v := m.GetF_Int32(); v != 12 {
- t.Errorf("m.FInt32 = %v, want 12", v)
- }
-}
-
-func TestSetDefaultsWithSubMessage(t *testing.T) {
- m := &OtherMessage{
- Key: Int64(123),
- Inner: &InnerMessage{
- Host: String("gopher"),
- },
- }
- expected := &OtherMessage{
- Key: Int64(123),
- Inner: &InnerMessage{
- Host: String("gopher"),
- Port: Int32(4000),
- },
- }
- SetDefaults(m)
- if !Equal(m, expected) {
- t.Errorf("\n got %v\nwant %v", m, expected)
- }
-}
-
-func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) {
- m := &MyMessage{
- RepInner: []*InnerMessage{{}},
- }
- expected := &MyMessage{
- RepInner: []*InnerMessage{{
- Port: Int32(4000),
- }},
- }
- SetDefaults(m)
- if !Equal(m, expected) {
- t.Errorf("\n got %v\nwant %v", m, expected)
- }
-}
-
-func TestSetDefaultWithRepeatedNonMessage(t *testing.T) {
- m := &MyMessage{
- Pet: []string{"turtle", "wombat"},
- }
- expected := Clone(m)
- SetDefaults(m)
- if !Equal(m, expected) {
- t.Errorf("\n got %v\nwant %v", m, expected)
- }
-}
-
-func TestMaximumTagNumber(t *testing.T) {
- m := &MaxTag{
- LastField: String("natural goat essence"),
- }
- buf, err := Marshal(m)
- if err != nil {
- t.Fatalf("proto.Marshal failed: %v", err)
- }
- m2 := new(MaxTag)
- if err := Unmarshal(buf, m2); err != nil {
- t.Fatalf("proto.Unmarshal failed: %v", err)
- }
- if got, want := m2.GetLastField(), *m.LastField; got != want {
- t.Errorf("got %q, want %q", got, want)
- }
-}
-
-func TestJSON(t *testing.T) {
- m := &MyMessage{
- Count: Int32(4),
- Pet: []string{"bunny", "kitty"},
- Inner: &InnerMessage{
- Host: String("cauchy"),
- },
- Bikeshed: MyMessage_GREEN.Enum(),
- }
- const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}`
-
- b, err := json.Marshal(m)
- if err != nil {
- t.Fatalf("json.Marshal failed: %v", err)
- }
- s := string(b)
- if s != expected {
- t.Errorf("got %s\nwant %s", s, expected)
- }
-
- received := new(MyMessage)
- if err := json.Unmarshal(b, received); err != nil {
- t.Fatalf("json.Unmarshal failed: %v", err)
- }
- if !Equal(received, m) {
- t.Fatalf("got %s, want %s", received, m)
- }
-
- // Test unmarshalling of JSON with symbolic enum name.
- const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}`
- received.Reset()
- if err := json.Unmarshal([]byte(old), received); err != nil {
- t.Fatalf("json.Unmarshal failed: %v", err)
- }
- if !Equal(received, m) {
- t.Fatalf("got %s, want %s", received, m)
- }
-}
-
-func TestBadWireType(t *testing.T) {
- b := []byte{7<<3 | 6} // field 7, wire type 6
- pb := new(OtherMessage)
- if err := Unmarshal(b, pb); err == nil {
- t.Errorf("Unmarshal did not fail")
- } else if !strings.Contains(err.Error(), "unknown wire type") {
- t.Errorf("wrong error: %v", err)
- }
-}
-
-func TestBytesWithInvalidLength(t *testing.T) {
- // If a byte sequence has an invalid (negative) length, Unmarshal should not panic.
- b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0}
- Unmarshal(b, new(MyMessage))
-}
-
-func TestLengthOverflow(t *testing.T) {
- // Overflowing a length should not panic.
- b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01}
- Unmarshal(b, new(MyMessage))
-}
-
-func TestVarintOverflow(t *testing.T) {
- // Overflowing a 64-bit length should not be allowed.
- b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}
- if err := Unmarshal(b, new(MyMessage)); err == nil {
- t.Fatalf("Overflowed uint64 length without error")
- }
-}
-
-func TestUnmarshalFuzz(t *testing.T) {
- const N = 1000
- seed := time.Now().UnixNano()
- t.Logf("RNG seed is %d", seed)
- rng := rand.New(rand.NewSource(seed))
- buf := make([]byte, 20)
- for i := 0; i < N; i++ {
- for j := range buf {
- buf[j] = byte(rng.Intn(256))
- }
- fuzzUnmarshal(t, buf)
- }
-}
-
-func TestMergeMessages(t *testing.T) {
- pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}}
- data, err := Marshal(pb)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
-
- pb1 := new(MessageList)
- if err := Unmarshal(data, pb1); err != nil {
- t.Fatalf("first Unmarshal: %v", err)
- }
- if err := Unmarshal(data, pb1); err != nil {
- t.Fatalf("second Unmarshal: %v", err)
- }
- if len(pb1.Message) != 1 {
- t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message))
- }
-
- pb2 := new(MessageList)
- if err := UnmarshalMerge(data, pb2); err != nil {
- t.Fatalf("first UnmarshalMerge: %v", err)
- }
- if err := UnmarshalMerge(data, pb2); err != nil {
- t.Fatalf("second UnmarshalMerge: %v", err)
- }
- if len(pb2.Message) != 2 {
- t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message))
- }
-}
-
-func TestExtensionMarshalOrder(t *testing.T) {
- m := &MyMessage{Count: Int(123)}
- if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil {
- t.Fatalf("SetExtension: %v", err)
- }
- if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil {
- t.Fatalf("SetExtension: %v", err)
- }
- if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil {
- t.Fatalf("SetExtension: %v", err)
- }
-
- // Serialize m several times, and check we get the same bytes each time.
- var orig []byte
- for i := 0; i < 100; i++ {
- b, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- if i == 0 {
- orig = b
- continue
- }
- if !bytes.Equal(b, orig) {
- t.Errorf("Bytes differ on attempt #%d", i)
- }
- }
-}
-
-// Many extensions, because small maps might not iterate differently on each iteration.
-var exts = []*ExtensionDesc{
- E_X201,
- E_X202,
- E_X203,
- E_X204,
- E_X205,
- E_X206,
- E_X207,
- E_X208,
- E_X209,
- E_X210,
- E_X211,
- E_X212,
- E_X213,
- E_X214,
- E_X215,
- E_X216,
- E_X217,
- E_X218,
- E_X219,
- E_X220,
- E_X221,
- E_X222,
- E_X223,
- E_X224,
- E_X225,
- E_X226,
- E_X227,
- E_X228,
- E_X229,
- E_X230,
- E_X231,
- E_X232,
- E_X233,
- E_X234,
- E_X235,
- E_X236,
- E_X237,
- E_X238,
- E_X239,
- E_X240,
- E_X241,
- E_X242,
- E_X243,
- E_X244,
- E_X245,
- E_X246,
- E_X247,
- E_X248,
- E_X249,
- E_X250,
-}
-
-func TestMessageSetMarshalOrder(t *testing.T) {
- m := &MyMessageSet{}
- for _, x := range exts {
- if err := SetExtension(m, x, &Empty{}); err != nil {
- t.Fatalf("SetExtension: %v", err)
- }
- }
-
- buf, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
-
- // Serialize m several times, and check we get the same bytes each time.
- for i := 0; i < 10; i++ {
- b1, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- if !bytes.Equal(b1, buf) {
- t.Errorf("Bytes differ on re-Marshal #%d", i)
- }
-
- m2 := &MyMessageSet{}
- if err := Unmarshal(buf, m2); err != nil {
- t.Errorf("Unmarshal: %v", err)
- }
- b2, err := Marshal(m2)
- if err != nil {
- t.Errorf("re-Marshal: %v", err)
- }
- if !bytes.Equal(b2, buf) {
- t.Errorf("Bytes differ on round-trip #%d", i)
- }
- }
-}
-
-func TestUnmarshalMergesMessages(t *testing.T) {
- // If a nested message occurs twice in the input,
- // the fields should be merged when decoding.
- a := &OtherMessage{
- Key: Int64(123),
- Inner: &InnerMessage{
- Host: String("polhode"),
- Port: Int32(1234),
- },
- }
- aData, err := Marshal(a)
- if err != nil {
- t.Fatalf("Marshal(a): %v", err)
- }
- b := &OtherMessage{
- Weight: Float32(1.2),
- Inner: &InnerMessage{
- Host: String("herpolhode"),
- Connected: Bool(true),
- },
- }
- bData, err := Marshal(b)
- if err != nil {
- t.Fatalf("Marshal(b): %v", err)
- }
- want := &OtherMessage{
- Key: Int64(123),
- Weight: Float32(1.2),
- Inner: &InnerMessage{
- Host: String("herpolhode"),
- Port: Int32(1234),
- Connected: Bool(true),
- },
- }
- got := new(OtherMessage)
- if err := Unmarshal(append(aData, bData...), got); err != nil {
- t.Fatalf("Unmarshal: %v", err)
- }
- if !Equal(got, want) {
- t.Errorf("\n got %v\nwant %v", got, want)
- }
-}
-
-func TestEncodingSizes(t *testing.T) {
- tests := []struct {
- m Message
- n int
- }{
- {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6},
- {&Defaults{F_Int32: Int32(math.MinInt32)}, 11},
- {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6},
- {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6},
- }
- for _, test := range tests {
- b, err := Marshal(test.m)
- if err != nil {
- t.Errorf("Marshal(%v): %v", test.m, err)
- continue
- }
- if len(b) != test.n {
- t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n)
- }
- }
-}
-
-func TestRequiredNotSetError(t *testing.T) {
- pb := initGoTest(false)
- pb.RequiredField.Label = nil
- pb.F_Int32Required = nil
- pb.F_Int64Required = nil
-
- expected := "0807" + // field 1, encoding 0, value 7
- "2206" + "120474797065" + // field 4, encoding 2 (GoTestField)
- "5001" + // field 10, encoding 0, value 1
- "6d20000000" + // field 13, encoding 5, value 0x20
- "714000000000000000" + // field 14, encoding 1, value 0x40
- "78a019" + // field 15, encoding 0, value 0xca0 = 3232
- "8001c032" + // field 16, encoding 0, value 0x1940 = 6464
- "8d0100004a45" + // field 17, encoding 5, value 3232.0
- "9101000000000040b940" + // field 18, encoding 1, value 6464.0
- "9a0106" + "737472696e67" + // field 19, encoding 2, string "string"
- "b304" + // field 70, encoding 3, start group
- "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required"
- "b404" + // field 70, encoding 4, end group
- "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes"
- "b0063f" + // field 102, encoding 0, 0x3f zigzag32
- "b8067f" // field 103, encoding 0, 0x7f zigzag64
-
- o := old()
- bytes, err := Marshal(pb)
- if _, ok := err.(*RequiredNotSetError); !ok {
- fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err)
- o.DebugPrint("", bytes)
- t.Fatalf("expected = %s", expected)
- }
- if strings.Index(err.Error(), "RequiredField.Label") < 0 {
- t.Errorf("marshal-1 wrong err msg: %v", err)
- }
- if !equal(bytes, expected, t) {
- o.DebugPrint("neq 1", bytes)
- t.Fatalf("expected = %s", expected)
- }
-
- // Now test Unmarshal by recreating the original buffer.
- pbd := new(GoTest)
- err = Unmarshal(bytes, pbd)
- if _, ok := err.(*RequiredNotSetError); !ok {
- t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err)
- o.DebugPrint("", bytes)
- t.Fatalf("string = %s", expected)
- }
- if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 {
- t.Errorf("unmarshal wrong err msg: %v", err)
- }
- bytes, err = Marshal(pbd)
- if _, ok := err.(*RequiredNotSetError); !ok {
- t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err)
- o.DebugPrint("", bytes)
- t.Fatalf("string = %s", expected)
- }
- if strings.Index(err.Error(), "RequiredField.Label") < 0 {
- t.Errorf("marshal-2 wrong err msg: %v", err)
- }
- if !equal(bytes, expected, t) {
- o.DebugPrint("neq 2", bytes)
- t.Fatalf("string = %s", expected)
- }
-}
-
-func fuzzUnmarshal(t *testing.T, data []byte) {
- defer func() {
- if e := recover(); e != nil {
- t.Errorf("These bytes caused a panic: %+v", data)
- t.Logf("Stack:\n%s", debug.Stack())
- t.FailNow()
- }
- }()
-
- pb := new(MyMessage)
- Unmarshal(data, pb)
-}
-
-func TestMapFieldMarshal(t *testing.T) {
- m := &MessageWithMap{
- NameMapping: map[int32]string{
- 1: "Rob",
- 4: "Ian",
- 8: "Dave",
- },
- }
- b, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
-
- // b should be the concatenation of these three byte sequences in some order.
- parts := []string{
- "\n\a\b\x01\x12\x03Rob",
- "\n\a\b\x04\x12\x03Ian",
- "\n\b\b\x08\x12\x04Dave",
- }
- ok := false
- for i := range parts {
- for j := range parts {
- if j == i {
- continue
- }
- for k := range parts {
- if k == i || k == j {
- continue
- }
- try := parts[i] + parts[j] + parts[k]
- if bytes.Equal(b, []byte(try)) {
- ok = true
- break
- }
- }
- }
- }
- if !ok {
- t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2])
- }
- t.Logf("FYI b: %q", b)
-
- (new(Buffer)).DebugPrint("Dump of b", b)
-}
-
-func TestMapFieldRoundTrips(t *testing.T) {
- m := &MessageWithMap{
- NameMapping: map[int32]string{
- 1: "Rob",
- 4: "Ian",
- 8: "Dave",
- },
- MsgMapping: map[int64]*FloatingPoint{
- 0x7001: &FloatingPoint{F: Float64(2.0)},
- },
- ByteMapping: map[bool][]byte{
- false: []byte("that's not right!"),
- true: []byte("aye, 'tis true!"),
- },
- }
- b, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- t.Logf("FYI b: %q", b)
- m2 := new(MessageWithMap)
- if err := Unmarshal(b, m2); err != nil {
- t.Fatalf("Unmarshal: %v", err)
- }
- for _, pair := range [][2]interface{}{
- {m.NameMapping, m2.NameMapping},
- {m.MsgMapping, m2.MsgMapping},
- {m.ByteMapping, m2.ByteMapping},
- } {
- if !reflect.DeepEqual(pair[0], pair[1]) {
- t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1])
- }
- }
-}
-
-func TestMapFieldWithNil(t *testing.T) {
- m1 := &MessageWithMap{
- MsgMapping: map[int64]*FloatingPoint{
- 1: nil,
- },
- }
- b, err := Marshal(m1)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- m2 := new(MessageWithMap)
- if err := Unmarshal(b, m2); err != nil {
- t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b)
- }
- if v, ok := m2.MsgMapping[1]; !ok {
- t.Error("msg_mapping[1] not present")
- } else if v != nil {
- t.Errorf("msg_mapping[1] not nil: %v", v)
- }
-}
-
-func TestMapFieldWithNilBytes(t *testing.T) {
- m1 := &MessageWithMap{
- ByteMapping: map[bool][]byte{
- false: []byte{},
- true: nil,
- },
- }
- n := Size(m1)
- b, err := Marshal(m1)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- if n != len(b) {
- t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b))
- }
- m2 := new(MessageWithMap)
- if err := Unmarshal(b, m2); err != nil {
- t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b)
- }
- if v, ok := m2.ByteMapping[false]; !ok {
- t.Error("byte_mapping[false] not present")
- } else if len(v) != 0 {
- t.Errorf("byte_mapping[false] not empty: %#v", v)
- }
- if v, ok := m2.ByteMapping[true]; !ok {
- t.Error("byte_mapping[true] not present")
- } else if len(v) != 0 {
- t.Errorf("byte_mapping[true] not empty: %#v", v)
- }
-}
-
-func TestDecodeMapFieldMissingKey(t *testing.T) {
- b := []byte{
- 0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes
- // no key
- 0x12, 0x01, 0x6D, // string value of length 1 byte, value "m"
- }
- got := &MessageWithMap{}
- err := Unmarshal(b, got)
- if err != nil {
- t.Fatalf("failed to marshal map with missing key: %v", err)
- }
- want := &MessageWithMap{NameMapping: map[int32]string{0: "m"}}
- if !Equal(got, want) {
- t.Errorf("Unmarshaled map with no key was not as expected. got: %v, want %v", got, want)
- }
-}
-
-func TestDecodeMapFieldMissingValue(t *testing.T) {
- b := []byte{
- 0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes
- 0x08, 0x01, // varint key, value 1
- // no value
- }
- got := &MessageWithMap{}
- err := Unmarshal(b, got)
- if err != nil {
- t.Fatalf("failed to marshal map with missing value: %v", err)
- }
- want := &MessageWithMap{NameMapping: map[int32]string{1: ""}}
- if !Equal(got, want) {
- t.Errorf("Unmarshaled map with no value was not as expected. got: %v, want %v", got, want)
- }
-}
-
-func TestOneof(t *testing.T) {
- m := &Communique{}
- b, err := Marshal(m)
- if err != nil {
- t.Fatalf("Marshal of empty message with oneof: %v", err)
- }
- if len(b) != 0 {
- t.Errorf("Marshal of empty message yielded too many bytes: %v", b)
- }
-
- m = &Communique{
- Union: &Communique_Name{"Barry"},
- }
-
- // Round-trip.
- b, err = Marshal(m)
- if err != nil {
- t.Fatalf("Marshal of message with oneof: %v", err)
- }
- if len(b) != 7 { // name tag/wire (1) + name len (1) + name (5)
- t.Errorf("Incorrect marshal of message with oneof: %v", b)
- }
- m.Reset()
- if err := Unmarshal(b, m); err != nil {
- t.Fatalf("Unmarshal of message with oneof: %v", err)
- }
- if x, ok := m.Union.(*Communique_Name); !ok || x.Name != "Barry" {
- t.Errorf("After round trip, Union = %+v", m.Union)
- }
- if name := m.GetName(); name != "Barry" {
- t.Errorf("After round trip, GetName = %q, want %q", name, "Barry")
- }
-
- // Let's try with a message in the oneof.
- m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}}
- b, err = Marshal(m)
- if err != nil {
- t.Fatalf("Marshal of message with oneof set to message: %v", err)
- }
- if len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16)
- t.Errorf("Incorrect marshal of message with oneof set to message: %v", b)
- }
- m.Reset()
- if err := Unmarshal(b, m); err != nil {
- t.Fatalf("Unmarshal of message with oneof set to message: %v", err)
- }
- ss, ok := m.Union.(*Communique_Msg)
- if !ok || ss.Msg.GetStringField() != "deep deep string" {
- t.Errorf("After round trip with oneof set to message, Union = %+v", m.Union)
- }
-}
-
-func TestInefficientPackedBool(t *testing.T) {
- // https://github.com/golang/protobuf/issues/76
- inp := []byte{
- 0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes
- // Usually a bool should take a single byte,
- // but it is permitted to be any varint.
- 0xb9, 0x30,
- }
- if err := Unmarshal(inp, new(MoreRepeated)); err != nil {
- t.Error(err)
- }
-}
-
-// Benchmarks
-
-func testMsg() *GoTest {
- pb := initGoTest(true)
- const N = 1000 // Internally the library starts much smaller.
- pb.F_Int32Repeated = make([]int32, N)
- pb.F_DoubleRepeated = make([]float64, N)
- for i := 0; i < N; i++ {
- pb.F_Int32Repeated[i] = int32(i)
- pb.F_DoubleRepeated[i] = float64(i)
- }
- return pb
-}
-
-func bytesMsg() *GoTest {
- pb := initGoTest(true)
- buf := make([]byte, 4000)
- for i := range buf {
- buf[i] = byte(i)
- }
- pb.F_BytesDefaulted = buf
- return pb
-}
-
-func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) {
- d, _ := marshal(pb)
- b.SetBytes(int64(len(d)))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- marshal(pb)
- }
-}
-
-func benchmarkBufferMarshal(b *testing.B, pb Message) {
- p := NewBuffer(nil)
- benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
- p.Reset()
- err := p.Marshal(pb0)
- return p.Bytes(), err
- })
-}
-
-func benchmarkSize(b *testing.B, pb Message) {
- benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
- Size(pb)
- return nil, nil
- })
-}
-
-func newOf(pb Message) Message {
- in := reflect.ValueOf(pb)
- if in.IsNil() {
- return pb
- }
- return reflect.New(in.Type().Elem()).Interface().(Message)
-}
-
-func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) {
- d, _ := Marshal(pb)
- b.SetBytes(int64(len(d)))
- pbd := newOf(pb)
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- unmarshal(d, pbd)
- }
-}
-
-func benchmarkBufferUnmarshal(b *testing.B, pb Message) {
- p := NewBuffer(nil)
- benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error {
- p.SetBuf(d)
- return p.Unmarshal(pb0)
- })
-}
-
-// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes}
-
-func BenchmarkMarshal(b *testing.B) {
- benchmarkMarshal(b, testMsg(), Marshal)
-}
-
-func BenchmarkBufferMarshal(b *testing.B) {
- benchmarkBufferMarshal(b, testMsg())
-}
-
-func BenchmarkSize(b *testing.B) {
- benchmarkSize(b, testMsg())
-}
-
-func BenchmarkUnmarshal(b *testing.B) {
- benchmarkUnmarshal(b, testMsg(), Unmarshal)
-}
-
-func BenchmarkBufferUnmarshal(b *testing.B) {
- benchmarkBufferUnmarshal(b, testMsg())
-}
-
-func BenchmarkMarshalBytes(b *testing.B) {
- benchmarkMarshal(b, bytesMsg(), Marshal)
-}
-
-func BenchmarkBufferMarshalBytes(b *testing.B) {
- benchmarkBufferMarshal(b, bytesMsg())
-}
-
-func BenchmarkSizeBytes(b *testing.B) {
- benchmarkSize(b, bytesMsg())
-}
-
-func BenchmarkUnmarshalBytes(b *testing.B) {
- benchmarkUnmarshal(b, bytesMsg(), Unmarshal)
-}
-
-func BenchmarkBufferUnmarshalBytes(b *testing.B) {
- benchmarkBufferUnmarshal(b, bytesMsg())
-}
-
-func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) {
- b.StopTimer()
- pb := initGoTestField()
- skip := &GoSkipTest{
- SkipInt32: Int32(32),
- SkipFixed32: Uint32(3232),
- SkipFixed64: Uint64(6464),
- SkipString: String("skipper"),
- Skipgroup: &GoSkipTest_SkipGroup{
- GroupInt32: Int32(75),
- GroupString: String("wxyz"),
- },
- }
-
- pbd := new(GoTestField)
- p := NewBuffer(nil)
- p.Marshal(pb)
- p.Marshal(skip)
- p2 := NewBuffer(nil)
-
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- p2.SetBuf(p.Bytes())
- p2.Unmarshal(pbd)
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/any_test.go b/vendor/src/github.com/golang/protobuf/proto/any_test.go
deleted file mode 100644
index 1a3c22e..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/any_test.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2016 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "strings"
- "testing"
-
- "github.com/golang/protobuf/proto"
-
- pb "github.com/golang/protobuf/proto/proto3_proto"
- testpb "github.com/golang/protobuf/proto/testdata"
- anypb "github.com/golang/protobuf/ptypes/any"
-)
-
-var (
- expandedMarshaler = proto.TextMarshaler{ExpandAny: true}
- expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true}
-)
-
-// anyEqual reports whether two messages which may be google.protobuf.Any or may
-// contain google.protobuf.Any fields are equal. We can't use proto.Equal for
-// comparison, because semantically equivalent messages may be marshaled to
-// binary in different tag order. Instead, trust that TextMarshaler with
-// ExpandAny option works and compare the text marshaling results.
-func anyEqual(got, want proto.Message) bool {
- // if messages are proto.Equal, no need to marshal.
- if proto.Equal(got, want) {
- return true
- }
- g := expandedMarshaler.Text(got)
- w := expandedMarshaler.Text(want)
- return g == w
-}
-
-type golden struct {
- m proto.Message
- t, c string
-}
-
-var goldenMessages = makeGolden()
-
-func makeGolden() []golden {
- nested := &pb.Nested{Bunny: "Monty"}
- nb, err := proto.Marshal(nested)
- if err != nil {
- panic(err)
- }
- m1 := &pb.Message{
- Name: "David",
- ResultCount: 47,
- Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb},
- }
- m2 := &pb.Message{
- Name: "David",
- ResultCount: 47,
- Anything: &anypb.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb},
- }
- m3 := &pb.Message{
- Name: "David",
- ResultCount: 47,
- Anything: &anypb.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb},
- }
- m4 := &pb.Message{
- Name: "David",
- ResultCount: 47,
- Anything: &anypb.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb},
- }
- m5 := &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}
-
- any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")}
- proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")})
- proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar"))
- any1b, err := proto.Marshal(any1)
- if err != nil {
- panic(err)
- }
- any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}}
- proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")})
- any2b, err := proto.Marshal(any2)
- if err != nil {
- panic(err)
- }
- m6 := &pb.Message{
- Name: "David",
- ResultCount: 47,
- Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
- ManyThings: []*anypb.Any{
- &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b},
- &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b},
- },
- }
-
- const (
- m1Golden = `
-name: "David"
-result_count: 47
-anything: <
- [type.googleapis.com/proto3_proto.Nested]: <
- bunny: "Monty"
- >
->
-`
- m2Golden = `
-name: "David"
-result_count: 47
-anything: <
- ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: <
- bunny: "Monty"
- >
->
-`
- m3Golden = `
-name: "David"
-result_count: 47
-anything: <
- ["type.googleapis.com/\"/proto3_proto.Nested"]: <
- bunny: "Monty"
- >
->
-`
- m4Golden = `
-name: "David"
-result_count: 47
-anything: <
- [type.googleapis.com/a/path/proto3_proto.Nested]: <
- bunny: "Monty"
- >
->
-`
- m5Golden = `
-[type.googleapis.com/proto3_proto.Nested]: <
- bunny: "Monty"
->
-`
- m6Golden = `
-name: "David"
-result_count: 47
-anything: <
- [type.googleapis.com/testdata.MyMessage]: <
- count: 47
- name: "David"
- [testdata.Ext.more]: <
- data: "foo"
- >
- [testdata.Ext.text]: "bar"
- >
->
-many_things: <
- [type.googleapis.com/testdata.MyMessage]: <
- count: 42
- bikeshed: GREEN
- rep_bytes: "roboto"
- [testdata.Ext.more]: <
- data: "baz"
- >
- >
->
-many_things: <
- [type.googleapis.com/testdata.MyMessage]: <
- count: 47
- name: "David"
- [testdata.Ext.more]: <
- data: "foo"
- >
- [testdata.Ext.text]: "bar"
- >
->
-`
- )
- return []golden{
- {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "},
- {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "},
- {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "},
- {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "},
- {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "},
- {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "},
- }
-}
-
-func TestMarshalGolden(t *testing.T) {
- for _, tt := range goldenMessages {
- if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want {
- t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want)
- }
- if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want {
- t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want)
- }
- }
-}
-
-func TestUnmarshalGolden(t *testing.T) {
- for _, tt := range goldenMessages {
- want := tt.m
- got := proto.Clone(tt.m)
- got.Reset()
- if err := proto.UnmarshalText(tt.t, got); err != nil {
- t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err)
- }
- if !anyEqual(got, want) {
- t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want)
- }
- got.Reset()
- if err := proto.UnmarshalText(tt.c, got); err != nil {
- t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err)
- }
- if !anyEqual(got, want) {
- t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want)
- }
- }
-}
-
-func TestMarshalUnknownAny(t *testing.T) {
- m := &pb.Message{
- Anything: &anypb.Any{
- TypeUrl: "foo",
- Value: []byte("bar"),
- },
- }
- want := `anything: <
- type_url: "foo"
- value: "bar"
->
-`
- got := expandedMarshaler.Text(m)
- if got != want {
- t.Errorf("got\n`%s`\nwant\n`%s`", got, want)
- }
-}
-
-func TestAmbiguousAny(t *testing.T) {
- pb := &anypb.Any{}
- err := proto.UnmarshalText(`
- type_url: "ttt/proto3_proto.Nested"
- value: "\n\x05Monty"
- `, pb)
- t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err)
- if err != nil {
- t.Errorf("failed to parse ambiguous Any message: %v", err)
- }
-}
-
-func TestUnmarshalOverwriteAny(t *testing.T) {
- pb := &anypb.Any{}
- err := proto.UnmarshalText(`
- [type.googleapis.com/a/path/proto3_proto.Nested]: <
- bunny: "Monty"
- >
- [type.googleapis.com/a/path/proto3_proto.Nested]: <
- bunny: "Rabbit of Caerbannog"
- >
- `, pb)
- want := `line 7: Any message unpacked multiple times, or "type_url" already set`
- if err.Error() != want {
- t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
- }
-}
-
-func TestUnmarshalAnyMixAndMatch(t *testing.T) {
- pb := &anypb.Any{}
- err := proto.UnmarshalText(`
- value: "\n\x05Monty"
- [type.googleapis.com/a/path/proto3_proto.Nested]: <
- bunny: "Rabbit of Caerbannog"
- >
- `, pb)
- want := `line 5: Any message unpacked multiple times, or "value" already set`
- if err.Error() != want {
- t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want)
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/clone.go b/vendor/src/github.com/golang/protobuf/proto/clone.go
deleted file mode 100644
index e392575..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/clone.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Protocol buffer deep copy and merge.
-// TODO: RawMessage.
-
-package proto
-
-import (
- "log"
- "reflect"
- "strings"
-)
-
-// Clone returns a deep copy of a protocol buffer.
-func Clone(pb Message) Message {
- in := reflect.ValueOf(pb)
- if in.IsNil() {
- return pb
- }
-
- out := reflect.New(in.Type().Elem())
- // out is empty so a merge is a deep copy.
- mergeStruct(out.Elem(), in.Elem())
- return out.Interface().(Message)
-}
-
-// Merge merges src into dst.
-// Required and optional fields that are set in src will be set to that value in dst.
-// Elements of repeated fields will be appended.
-// Merge panics if src and dst are not the same type, or if dst is nil.
-func Merge(dst, src Message) {
- in := reflect.ValueOf(src)
- out := reflect.ValueOf(dst)
- if out.IsNil() {
- panic("proto: nil destination")
- }
- if in.Type() != out.Type() {
- // Explicit test prior to mergeStruct so that mistyped nils will fail
- panic("proto: type mismatch")
- }
- if in.IsNil() {
- // Merging nil into non-nil is a quiet no-op
- return
- }
- mergeStruct(out.Elem(), in.Elem())
-}
-
-func mergeStruct(out, in reflect.Value) {
- sprop := GetProperties(in.Type())
- for i := 0; i < in.NumField(); i++ {
- f := in.Type().Field(i)
- if strings.HasPrefix(f.Name, "XXX_") {
- continue
- }
- mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
- }
-
- if emIn, ok := extendable(in.Addr().Interface()); ok {
- emOut, _ := extendable(out.Addr().Interface())
- mIn, muIn := emIn.extensionsRead()
- if mIn != nil {
- mOut := emOut.extensionsWrite()
- muIn.Lock()
- mergeExtension(mOut, mIn)
- muIn.Unlock()
- }
- }
-
- uf := in.FieldByName("XXX_unrecognized")
- if !uf.IsValid() {
- return
- }
- uin := uf.Bytes()
- if len(uin) > 0 {
- out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
- }
-}
-
-// mergeAny performs a merge between two values of the same type.
-// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
-// prop is set if this is a struct field (it may be nil).
-func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
- if in.Type() == protoMessageType {
- if !in.IsNil() {
- if out.IsNil() {
- out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
- } else {
- Merge(out.Interface().(Message), in.Interface().(Message))
- }
- }
- return
- }
- switch in.Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- if !viaPtr && isProto3Zero(in) {
- return
- }
- out.Set(in)
- case reflect.Interface:
- // Probably a oneof field; copy non-nil values.
- if in.IsNil() {
- return
- }
- // Allocate destination if it is not set, or set to a different type.
- // Otherwise we will merge as normal.
- if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
- out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
- }
- mergeAny(out.Elem(), in.Elem(), false, nil)
- case reflect.Map:
- if in.Len() == 0 {
- return
- }
- if out.IsNil() {
- out.Set(reflect.MakeMap(in.Type()))
- }
- // For maps with value types of *T or []byte we need to deep copy each value.
- elemKind := in.Type().Elem().Kind()
- for _, key := range in.MapKeys() {
- var val reflect.Value
- switch elemKind {
- case reflect.Ptr:
- val = reflect.New(in.Type().Elem().Elem())
- mergeAny(val, in.MapIndex(key), false, nil)
- case reflect.Slice:
- val = in.MapIndex(key)
- val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
- default:
- val = in.MapIndex(key)
- }
- out.SetMapIndex(key, val)
- }
- case reflect.Ptr:
- if in.IsNil() {
- return
- }
- if out.IsNil() {
- out.Set(reflect.New(in.Elem().Type()))
- }
- mergeAny(out.Elem(), in.Elem(), true, nil)
- case reflect.Slice:
- if in.IsNil() {
- return
- }
- if in.Type().Elem().Kind() == reflect.Uint8 {
- // []byte is a scalar bytes field, not a repeated field.
-
- // Edge case: if this is in a proto3 message, a zero length
- // bytes field is considered the zero value, and should not
- // be merged.
- if prop != nil && prop.proto3 && in.Len() == 0 {
- return
- }
-
- // Make a deep copy.
- // Append to []byte{} instead of []byte(nil) so that we never end up
- // with a nil result.
- out.SetBytes(append([]byte{}, in.Bytes()...))
- return
- }
- n := in.Len()
- if out.IsNil() {
- out.Set(reflect.MakeSlice(in.Type(), 0, n))
- }
- switch in.Type().Elem().Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
- reflect.String, reflect.Uint32, reflect.Uint64:
- out.Set(reflect.AppendSlice(out, in))
- default:
- for i := 0; i < n; i++ {
- x := reflect.Indirect(reflect.New(in.Type().Elem()))
- mergeAny(x, in.Index(i), false, nil)
- out.Set(reflect.Append(out, x))
- }
- }
- case reflect.Struct:
- mergeStruct(out, in)
- default:
- // unknown type, so not a protocol buffer
- log.Printf("proto: don't know how to copy %v", in)
- }
-}
-
-func mergeExtension(out, in map[int32]Extension) {
- for extNum, eIn := range in {
- eOut := Extension{desc: eIn.desc}
- if eIn.value != nil {
- v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
- mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
- eOut.value = v.Interface()
- }
- if eIn.enc != nil {
- eOut.enc = make([]byte, len(eIn.enc))
- copy(eOut.enc, eIn.enc)
- }
-
- out[extNum] = eOut
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/clone_test.go b/vendor/src/github.com/golang/protobuf/proto/clone_test.go
deleted file mode 100644
index f607ff4..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/clone_test.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2011 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "testing"
-
- "github.com/golang/protobuf/proto"
-
- proto3pb "github.com/golang/protobuf/proto/proto3_proto"
- pb "github.com/golang/protobuf/proto/testdata"
-)
-
-var cloneTestMessage = &pb.MyMessage{
- Count: proto.Int32(42),
- Name: proto.String("Dave"),
- Pet: []string{"bunny", "kitty", "horsey"},
- Inner: &pb.InnerMessage{
- Host: proto.String("niles"),
- Port: proto.Int32(9099),
- Connected: proto.Bool(true),
- },
- Others: []*pb.OtherMessage{
- {
- Value: []byte("some bytes"),
- },
- },
- Somegroup: &pb.MyMessage_SomeGroup{
- GroupField: proto.Int32(6),
- },
- RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
-}
-
-func init() {
- ext := &pb.Ext{
- Data: proto.String("extension"),
- }
- if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
- panic("SetExtension: " + err.Error())
- }
-}
-
-func TestClone(t *testing.T) {
- m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
- if !proto.Equal(m, cloneTestMessage) {
- t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
- }
-
- // Verify it was a deep copy.
- *m.Inner.Port++
- if proto.Equal(m, cloneTestMessage) {
- t.Error("Mutating clone changed the original")
- }
- // Byte fields and repeated fields should be copied.
- if &m.Pet[0] == &cloneTestMessage.Pet[0] {
- t.Error("Pet: repeated field not copied")
- }
- if &m.Others[0] == &cloneTestMessage.Others[0] {
- t.Error("Others: repeated field not copied")
- }
- if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
- t.Error("Others[0].Value: bytes field not copied")
- }
- if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
- t.Error("RepBytes: repeated field not copied")
- }
- if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
- t.Error("RepBytes[0]: bytes field not copied")
- }
-}
-
-func TestCloneNil(t *testing.T) {
- var m *pb.MyMessage
- if c := proto.Clone(m); !proto.Equal(m, c) {
- t.Errorf("Clone(%v) = %v", m, c)
- }
-}
-
-var mergeTests = []struct {
- src, dst, want proto.Message
-}{
- {
- src: &pb.MyMessage{
- Count: proto.Int32(42),
- },
- dst: &pb.MyMessage{
- Name: proto.String("Dave"),
- },
- want: &pb.MyMessage{
- Count: proto.Int32(42),
- Name: proto.String("Dave"),
- },
- },
- {
- src: &pb.MyMessage{
- Inner: &pb.InnerMessage{
- Host: proto.String("hey"),
- Connected: proto.Bool(true),
- },
- Pet: []string{"horsey"},
- Others: []*pb.OtherMessage{
- {
- Value: []byte("some bytes"),
- },
- },
- },
- dst: &pb.MyMessage{
- Inner: &pb.InnerMessage{
- Host: proto.String("niles"),
- Port: proto.Int32(9099),
- },
- Pet: []string{"bunny", "kitty"},
- Others: []*pb.OtherMessage{
- {
- Key: proto.Int64(31415926535),
- },
- {
- // Explicitly test a src=nil field
- Inner: nil,
- },
- },
- },
- want: &pb.MyMessage{
- Inner: &pb.InnerMessage{
- Host: proto.String("hey"),
- Connected: proto.Bool(true),
- Port: proto.Int32(9099),
- },
- Pet: []string{"bunny", "kitty", "horsey"},
- Others: []*pb.OtherMessage{
- {
- Key: proto.Int64(31415926535),
- },
- {},
- {
- Value: []byte("some bytes"),
- },
- },
- },
- },
- {
- src: &pb.MyMessage{
- RepBytes: [][]byte{[]byte("wow")},
- },
- dst: &pb.MyMessage{
- Somegroup: &pb.MyMessage_SomeGroup{
- GroupField: proto.Int32(6),
- },
- RepBytes: [][]byte{[]byte("sham")},
- },
- want: &pb.MyMessage{
- Somegroup: &pb.MyMessage_SomeGroup{
- GroupField: proto.Int32(6),
- },
- RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
- },
- },
- // Check that a scalar bytes field replaces rather than appends.
- {
- src: &pb.OtherMessage{Value: []byte("foo")},
- dst: &pb.OtherMessage{Value: []byte("bar")},
- want: &pb.OtherMessage{Value: []byte("foo")},
- },
- {
- src: &pb.MessageWithMap{
- NameMapping: map[int32]string{6: "Nigel"},
- MsgMapping: map[int64]*pb.FloatingPoint{
- 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
- 0x4002: &pb.FloatingPoint{
- F: proto.Float64(2.0),
- },
- },
- ByteMapping: map[bool][]byte{true: []byte("wowsa")},
- },
- dst: &pb.MessageWithMap{
- NameMapping: map[int32]string{
- 6: "Bruce", // should be overwritten
- 7: "Andrew",
- },
- MsgMapping: map[int64]*pb.FloatingPoint{
- 0x4002: &pb.FloatingPoint{
- F: proto.Float64(3.0),
- Exact: proto.Bool(true),
- }, // the entire message should be overwritten
- },
- },
- want: &pb.MessageWithMap{
- NameMapping: map[int32]string{
- 6: "Nigel",
- 7: "Andrew",
- },
- MsgMapping: map[int64]*pb.FloatingPoint{
- 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
- 0x4002: &pb.FloatingPoint{
- F: proto.Float64(2.0),
- },
- },
- ByteMapping: map[bool][]byte{true: []byte("wowsa")},
- },
- },
- // proto3 shouldn't merge zero values,
- // in the same way that proto2 shouldn't merge nils.
- {
- src: &proto3pb.Message{
- Name: "Aaron",
- Data: []byte(""), // zero value, but not nil
- },
- dst: &proto3pb.Message{
- HeightInCm: 176,
- Data: []byte("texas!"),
- },
- want: &proto3pb.Message{
- Name: "Aaron",
- HeightInCm: 176,
- Data: []byte("texas!"),
- },
- },
- // Oneof fields should merge by assignment.
- {
- src: &pb.Communique{
- Union: &pb.Communique_Number{41},
- },
- dst: &pb.Communique{
- Union: &pb.Communique_Name{"Bobby Tables"},
- },
- want: &pb.Communique{
- Union: &pb.Communique_Number{41},
- },
- },
- // Oneof nil is the same as not set.
- {
- src: &pb.Communique{},
- dst: &pb.Communique{
- Union: &pb.Communique_Name{"Bobby Tables"},
- },
- want: &pb.Communique{
- Union: &pb.Communique_Name{"Bobby Tables"},
- },
- },
- {
- src: &proto3pb.Message{
- Terrain: map[string]*proto3pb.Nested{
- "kay_a": &proto3pb.Nested{Cute: true}, // replace
- "kay_b": &proto3pb.Nested{Bunny: "rabbit"}, // insert
- },
- },
- dst: &proto3pb.Message{
- Terrain: map[string]*proto3pb.Nested{
- "kay_a": &proto3pb.Nested{Bunny: "lost"}, // replaced
- "kay_c": &proto3pb.Nested{Bunny: "bunny"}, // keep
- },
- },
- want: &proto3pb.Message{
- Terrain: map[string]*proto3pb.Nested{
- "kay_a": &proto3pb.Nested{Cute: true},
- "kay_b": &proto3pb.Nested{Bunny: "rabbit"},
- "kay_c": &proto3pb.Nested{Bunny: "bunny"},
- },
- },
- },
-}
-
-func TestMerge(t *testing.T) {
- for _, m := range mergeTests {
- got := proto.Clone(m.dst)
- proto.Merge(got, m.src)
- if !proto.Equal(got, m.want) {
- t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
- }
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/decode.go b/vendor/src/github.com/golang/protobuf/proto/decode.go
deleted file mode 100644
index aa20729..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/decode.go
+++ /dev/null
@@ -1,970 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for decoding protocol buffer data to construct in-memory representations.
- */
-
-import (
- "errors"
- "fmt"
- "io"
- "os"
- "reflect"
-)
-
-// errOverflow is returned when an integer is too large to be represented.
-var errOverflow = errors.New("proto: integer overflow")
-
-// ErrInternalBadWireType is returned by generated code when an incorrect
-// wire type is encountered. It does not get returned to user code.
-var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-
-// The fundamental decoders that interpret bytes on the wire.
-// Those that take integer types all return uint64 and are
-// therefore of type valueDecoder.
-
-// DecodeVarint reads a varint-encoded integer from the slice.
-// It returns the integer and the number of bytes consumed, or
-// zero if there is not enough.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func DecodeVarint(buf []byte) (x uint64, n int) {
- for shift := uint(0); shift < 64; shift += 7 {
- if n >= len(buf) {
- return 0, 0
- }
- b := uint64(buf[n])
- n++
- x |= (b & 0x7F) << shift
- if (b & 0x80) == 0 {
- return x, n
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- return 0, 0
-}
-
-func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
- i := p.index
- l := len(p.buf)
-
- for shift := uint(0); shift < 64; shift += 7 {
- if i >= l {
- err = io.ErrUnexpectedEOF
- return
- }
- b := p.buf[i]
- i++
- x |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- p.index = i
- return
- }
- }
-
- // The number is too large to represent in a 64-bit value.
- err = errOverflow
- return
-}
-
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
- i := p.index
- buf := p.buf
-
- if i >= len(buf) {
- return 0, io.ErrUnexpectedEOF
- } else if buf[i] < 0x80 {
- p.index++
- return uint64(buf[i]), nil
- } else if len(buf)-i < 10 {
- return p.decodeVarintSlow()
- }
-
- var b uint64
- // we already checked the first byte
- x = uint64(buf[i]) - 0x80
- i++
-
- b = uint64(buf[i])
- i++
- x += b << 7
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 7
-
- b = uint64(buf[i])
- i++
- x += b << 14
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 14
-
- b = uint64(buf[i])
- i++
- x += b << 21
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 21
-
- b = uint64(buf[i])
- i++
- x += b << 28
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 28
-
- b = uint64(buf[i])
- i++
- x += b << 35
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 35
-
- b = uint64(buf[i])
- i++
- x += b << 42
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 42
-
- b = uint64(buf[i])
- i++
- x += b << 49
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 49
-
- b = uint64(buf[i])
- i++
- x += b << 56
- if b&0x80 == 0 {
- goto done
- }
- x -= 0x80 << 56
-
- b = uint64(buf[i])
- i++
- x += b << 63
- if b&0x80 == 0 {
- goto done
- }
- // x -= 0x80 << 63 // Always zero.
-
- return 0, errOverflow
-
-done:
- p.index = i
- return x, nil
-}
-
-// DecodeFixed64 reads a 64-bit integer from the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) DecodeFixed64() (x uint64, err error) {
- // x, err already 0
- i := p.index + 8
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-8])
- x |= uint64(p.buf[i-7]) << 8
- x |= uint64(p.buf[i-6]) << 16
- x |= uint64(p.buf[i-5]) << 24
- x |= uint64(p.buf[i-4]) << 32
- x |= uint64(p.buf[i-3]) << 40
- x |= uint64(p.buf[i-2]) << 48
- x |= uint64(p.buf[i-1]) << 56
- return
-}
-
-// DecodeFixed32 reads a 32-bit integer from the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) DecodeFixed32() (x uint64, err error) {
- // x, err already 0
- i := p.index + 4
- if i < 0 || i > len(p.buf) {
- err = io.ErrUnexpectedEOF
- return
- }
- p.index = i
-
- x = uint64(p.buf[i-4])
- x |= uint64(p.buf[i-3]) << 8
- x |= uint64(p.buf[i-2]) << 16
- x |= uint64(p.buf[i-1]) << 24
- return
-}
-
-// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
-// from the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
- return
-}
-
-// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
-// from the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
- x, err = p.DecodeVarint()
- if err != nil {
- return
- }
- x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
- return
-}
-
-// These are not ValueDecoders: they produce an array of bytes or a string.
-// bytes, embedded messages
-
-// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
- n, err := p.DecodeVarint()
- if err != nil {
- return nil, err
- }
-
- nb := int(n)
- if nb < 0 {
- return nil, fmt.Errorf("proto: bad byte length %d", nb)
- }
- end := p.index + nb
- if end < p.index || end > len(p.buf) {
- return nil, io.ErrUnexpectedEOF
- }
-
- if !alloc {
- // todo: check if can get more uses of alloc=false
- buf = p.buf[p.index:end]
- p.index += nb
- return
- }
-
- buf = make([]byte, nb)
- copy(buf, p.buf[p.index:])
- p.index += nb
- return
-}
-
-// DecodeStringBytes reads an encoded string from the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) DecodeStringBytes() (s string, err error) {
- buf, err := p.DecodeRawBytes(false)
- if err != nil {
- return
- }
- return string(buf), nil
-}
-
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-// If the protocol buffer has extensions, and the field matches, add it as an extension.
-// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
-func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
- oi := o.index
-
- err := o.skip(t, tag, wire)
- if err != nil {
- return err
- }
-
- if !unrecField.IsValid() {
- return nil
- }
-
- ptr := structPointer_Bytes(base, unrecField)
-
- // Add the skipped field to struct field
- obuf := o.buf
-
- o.buf = *ptr
- o.EncodeVarint(uint64(tag<<3 | wire))
- *ptr = append(o.buf, obuf[oi:o.index]...)
-
- o.buf = obuf
-
- return nil
-}
-
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
-
- var u uint64
- var err error
-
- switch wire {
- case WireVarint:
- _, err = o.DecodeVarint()
- case WireFixed64:
- _, err = o.DecodeFixed64()
- case WireBytes:
- _, err = o.DecodeRawBytes(false)
- case WireFixed32:
- _, err = o.DecodeFixed32()
- case WireStartGroup:
- for {
- u, err = o.DecodeVarint()
- if err != nil {
- break
- }
- fwire := int(u & 0x7)
- if fwire == WireEndGroup {
- break
- }
- ftag := int(u >> 3)
- err = o.skip(t, ftag, fwire)
- if err != nil {
- break
- }
- }
- default:
- err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
- }
- return err
-}
-
-// Unmarshaler is the interface representing objects that can
-// unmarshal themselves. The method should reset the receiver before
-// decoding starts. The argument points to data that may be
-// overwritten, so implementations should not keep references to the
-// buffer.
-type Unmarshaler interface {
- Unmarshal([]byte) error
-}
-
-// Unmarshal parses the protocol buffer representation in buf and places the
-// decoded result in pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// Unmarshal resets pb before starting to unmarshal, so any
-// existing data in pb is always removed. Use UnmarshalMerge
-// to preserve and append to existing data.
-func Unmarshal(buf []byte, pb Message) error {
- pb.Reset()
- return UnmarshalMerge(buf, pb)
-}
-
-// UnmarshalMerge parses the protocol buffer representation in buf and
-// writes the decoded result to pb. If the struct underlying pb does not match
-// the data in buf, the results can be unpredictable.
-//
-// UnmarshalMerge merges into existing data in pb.
-// Most code should use Unmarshal instead.
-func UnmarshalMerge(buf []byte, pb Message) error {
- // If the object can unmarshal itself, let it.
- if u, ok := pb.(Unmarshaler); ok {
- return u.Unmarshal(buf)
- }
- return NewBuffer(buf).Unmarshal(pb)
-}
-
-// DecodeMessage reads a count-delimited message from the Buffer.
-func (p *Buffer) DecodeMessage(pb Message) error {
- enc, err := p.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return NewBuffer(enc).Unmarshal(pb)
-}
-
-// DecodeGroup reads a tag-delimited group from the Buffer.
-func (p *Buffer) DecodeGroup(pb Message) error {
- typ, base, err := getbase(pb)
- if err != nil {
- return err
- }
- return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
-}
-
-// Unmarshal parses the protocol buffer representation in the
-// Buffer and places the decoded result in pb. If the struct
-// underlying pb does not match the data in the buffer, the results can be
-// unpredictable.
-//
-// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
-func (p *Buffer) Unmarshal(pb Message) error {
- // If the object can unmarshal itself, let it.
- if u, ok := pb.(Unmarshaler); ok {
- err := u.Unmarshal(p.buf[p.index:])
- p.index = len(p.buf)
- return err
- }
-
- typ, base, err := getbase(pb)
- if err != nil {
- return err
- }
-
- err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
-
- if collectStats {
- stats.Decode++
- }
-
- return err
-}
-
-// unmarshalType does the work of unmarshaling a structure.
-func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
- var state errorState
- required, reqFields := prop.reqCount, uint64(0)
-
- var err error
- for err == nil && o.index < len(o.buf) {
- oi := o.index
- var u uint64
- u, err = o.DecodeVarint()
- if err != nil {
- break
- }
- wire := int(u & 0x7)
- if wire == WireEndGroup {
- if is_group {
- if required > 0 {
- // Not enough information to determine the exact field.
- // (See below.)
- return &RequiredNotSetError{"{Unknown}"}
- }
- return nil // input is satisfied
- }
- return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
- }
- tag := int(u >> 3)
- if tag <= 0 {
- return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
- }
- fieldnum, ok := prop.decoderTags.get(tag)
- if !ok {
- // Maybe it's an extension?
- if prop.extendable {
- if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
- if err = o.skip(st, tag, wire); err == nil {
- extmap := e.extensionsWrite()
- ext := extmap[int32(tag)] // may be missing
- ext.enc = append(ext.enc, o.buf[oi:o.index]...)
- extmap[int32(tag)] = ext
- }
- continue
- }
- }
- // Maybe it's a oneof?
- if prop.oneofUnmarshaler != nil {
- m := structPointer_Interface(base, st).(Message)
- // First return value indicates whether tag is a oneof field.
- ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
- if err == ErrInternalBadWireType {
- // Map the error to something more descriptive.
- // Do the formatting here to save generated code space.
- err = fmt.Errorf("bad wiretype for oneof field in %T", m)
- }
- if ok {
- continue
- }
- }
- err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
- continue
- }
- p := prop.Prop[fieldnum]
-
- if p.dec == nil {
- fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
- continue
- }
- dec := p.dec
- if wire != WireStartGroup && wire != p.WireType {
- if wire == WireBytes && p.packedDec != nil {
- // a packable field
- dec = p.packedDec
- } else {
- err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
- continue
- }
- }
- decErr := dec(o, p, base)
- if decErr != nil && !state.shouldContinue(decErr, p) {
- err = decErr
- }
- if err == nil && p.Required {
- // Successfully decoded a required field.
- if tag <= 64 {
- // use bitmap for fields 1-64 to catch field reuse.
- var mask uint64 = 1 << uint64(tag-1)
- if reqFields&mask == 0 {
- // new required field
- reqFields |= mask
- required--
- }
- } else {
- // This is imprecise. It can be fooled by a required field
- // with a tag > 64 that is encoded twice; that's very rare.
- // A fully correct implementation would require allocating
- // a data structure, which we would like to avoid.
- required--
- }
- }
- }
- if err == nil {
- if is_group {
- return io.ErrUnexpectedEOF
- }
- if state.err != nil {
- return state.err
- }
- if required > 0 {
- // Not enough information to determine the exact field. If we use extra
- // CPU, we could determine the field only if the missing required field
- // has a tag <= 64 and we check reqFields.
- return &RequiredNotSetError{"{Unknown}"}
- }
- }
- return err
-}
-
-// Individual type decoders
-// For each,
-// u is the decoded value,
-// v is a pointer to the field (pointer) in the struct
-
-// Sizes of the pools to allocate inside the Buffer.
-// The goal is modest amortization and allocation
-// on at least 16-byte boundaries.
-const (
- boolPoolSize = 16
- uint32PoolSize = 8
- uint64PoolSize = 4
-)
-
-// Decode a bool.
-func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- if len(o.bools) == 0 {
- o.bools = make([]bool, boolPoolSize)
- }
- o.bools[0] = u != 0
- *structPointer_Bool(base, p.field) = &o.bools[0]
- o.bools = o.bools[1:]
- return nil
-}
-
-func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- *structPointer_BoolVal(base, p.field) = u != 0
- return nil
-}
-
-// Decode an int32.
-func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
- return nil
-}
-
-func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
- return nil
-}
-
-// Decode an int64.
-func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word64_Set(structPointer_Word64(base, p.field), o, u)
- return nil
-}
-
-func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
- return nil
-}
-
-// Decode a string.
-func (o *Buffer) dec_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- *structPointer_String(base, p.field) = &s
- return nil
-}
-
-func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- *structPointer_StringVal(base, p.field) = s
- return nil
-}
-
-// Decode a slice of bytes ([]byte).
-func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
- b, err := o.DecodeRawBytes(true)
- if err != nil {
- return err
- }
- *structPointer_Bytes(base, p.field) = b
- return nil
-}
-
-// Decode a slice of bools ([]bool).
-func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v := structPointer_BoolSlice(base, p.field)
- *v = append(*v, u != 0)
- return nil
-}
-
-// Decode a slice of bools ([]bool) in packed format.
-func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
- v := structPointer_BoolSlice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded bools
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
-
- y := *v
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- y = append(y, u != 0)
- }
-
- *v = y
- return nil
-}
-
-// Decode a slice of int32s ([]int32).
-func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- structPointer_Word32Slice(base, p.field).Append(uint32(u))
- return nil
-}
-
-// Decode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32Slice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded int32s
-
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v.Append(uint32(u))
- }
- return nil
-}
-
-// Decode a slice of int64s ([]int64).
-func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
-
- structPointer_Word64Slice(base, p.field).Append(u)
- return nil
-}
-
-// Decode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64Slice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded int64s
-
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v.Append(u)
- }
- return nil
-}
-
-// Decode a slice of strings ([]string).
-func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- v := structPointer_StringSlice(base, p.field)
- *v = append(*v, s)
- return nil
-}
-
-// Decode a slice of slice of bytes ([][]byte).
-func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
- b, err := o.DecodeRawBytes(true)
- if err != nil {
- return err
- }
- v := structPointer_BytesSlice(base, p.field)
- *v = append(*v, b)
- return nil
-}
-
-// Decode a map field.
-func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
- raw, err := o.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- oi := o.index // index at the end of this map entry
- o.index -= len(raw) // move buffer back to start of map entry
-
- mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
- if mptr.Elem().IsNil() {
- mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
- }
- v := mptr.Elem() // map[K]V
-
- // Prepare addressable doubly-indirect placeholders for the key and value types.
- // See enc_new_map for why.
- keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
- keybase := toStructPointer(keyptr.Addr()) // **K
-
- var valbase structPointer
- var valptr reflect.Value
- switch p.mtype.Elem().Kind() {
- case reflect.Slice:
- // []byte
- var dummy []byte
- valptr = reflect.ValueOf(&dummy) // *[]byte
- valbase = toStructPointer(valptr) // *[]byte
- case reflect.Ptr:
- // message; valptr is **Msg; need to allocate the intermediate pointer
- valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
- valptr.Set(reflect.New(valptr.Type().Elem()))
- valbase = toStructPointer(valptr)
- default:
- // everything else
- valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
- valbase = toStructPointer(valptr.Addr()) // **V
- }
-
- // Decode.
- // This parses a restricted wire format, namely the encoding of a message
- // with two fields. See enc_new_map for the format.
- for o.index < oi {
- // tagcode for key and value properties are always a single byte
- // because they have tags 1 and 2.
- tagcode := o.buf[o.index]
- o.index++
- switch tagcode {
- case p.mkeyprop.tagcode[0]:
- if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
- return err
- }
- case p.mvalprop.tagcode[0]:
- if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
- return err
- }
- default:
- // TODO: Should we silently skip this instead?
- return fmt.Errorf("proto: bad map data tag %d", raw[0])
- }
- }
- keyelem, valelem := keyptr.Elem(), valptr.Elem()
- if !keyelem.IsValid() {
- keyelem = reflect.Zero(p.mtype.Key())
- }
- if !valelem.IsValid() {
- valelem = reflect.Zero(p.mtype.Elem())
- }
-
- v.SetMapIndex(keyelem, valelem)
- return nil
-}
-
-// Decode a group.
-func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
- bas := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(bas) {
- // allocate new nested message
- bas = toStructPointer(reflect.New(p.stype))
- structPointer_SetStructPointer(base, p.field, bas)
- }
- return o.unmarshalType(p.stype, p.sprop, true, bas)
-}
-
-// Decode an embedded message.
-func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
- raw, e := o.DecodeRawBytes(false)
- if e != nil {
- return e
- }
-
- bas := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(bas) {
- // allocate new nested message
- bas = toStructPointer(reflect.New(p.stype))
- structPointer_SetStructPointer(base, p.field, bas)
- }
-
- // If the object can unmarshal itself, let it.
- if p.isUnmarshaler {
- iv := structPointer_Interface(bas, p.stype)
- return iv.(Unmarshaler).Unmarshal(raw)
- }
-
- obuf := o.buf
- oi := o.index
- o.buf = raw
- o.index = 0
-
- err = o.unmarshalType(p.stype, p.sprop, false, bas)
- o.buf = obuf
- o.index = oi
-
- return err
-}
-
-// Decode a slice of embedded messages.
-func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
- return o.dec_slice_struct(p, false, base)
-}
-
-// Decode a slice of embedded groups.
-func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
- return o.dec_slice_struct(p, true, base)
-}
-
-// Decode a slice of structs ([]*struct).
-func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
- v := reflect.New(p.stype)
- bas := toStructPointer(v)
- structPointer_StructPointerSlice(base, p.field).Append(bas)
-
- if is_group {
- err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
- return err
- }
-
- raw, err := o.DecodeRawBytes(false)
- if err != nil {
- return err
- }
-
- // If the object can unmarshal itself, let it.
- if p.isUnmarshaler {
- iv := v.Interface()
- return iv.(Unmarshaler).Unmarshal(raw)
- }
-
- obuf := o.buf
- oi := o.index
- o.buf = raw
- o.index = 0
-
- err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
-
- o.buf = obuf
- o.index = oi
-
- return err
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/decode_test.go b/vendor/src/github.com/golang/protobuf/proto/decode_test.go
deleted file mode 100644
index b1f1304..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/decode_test.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "fmt"
- "testing"
-
- "github.com/golang/protobuf/proto"
- tpb "github.com/golang/protobuf/proto/proto3_proto"
-)
-
-var (
- bytesBlackhole []byte
- msgBlackhole = new(tpb.Message)
-)
-
-// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and
-// 2 bytes long).
-func BenchmarkVarint32ArraySmall(b *testing.B) {
- for i := uint(1); i <= 10; i++ {
- dist := genInt32Dist([7]int{0, 3, 1}, 1<2GB.
- ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-)
-
-// The fundamental encoders that put bytes on the wire.
-// Those that take integer types all accept uint64 and are
-// therefore of type valueEncoder.
-
-const maxVarintBytes = 10 // maximum length of a varint
-
-// maxMarshalSize is the largest allowed size of an encoded protobuf,
-// since C++ and Java use signed int32s for the size.
-const maxMarshalSize = 1<<31 - 1
-
-// EncodeVarint returns the varint encoding of x.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-// Not used by the package itself, but helpful to clients
-// wishing to use the same encoding.
-func EncodeVarint(x uint64) []byte {
- var buf [maxVarintBytes]byte
- var n int
- for n = 0; x > 127; n++ {
- buf[n] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- buf[n] = uint8(x)
- n++
- return buf[0:n]
-}
-
-// EncodeVarint writes a varint-encoded integer to the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) EncodeVarint(x uint64) error {
- for x >= 1<<7 {
- p.buf = append(p.buf, uint8(x&0x7f|0x80))
- x >>= 7
- }
- p.buf = append(p.buf, uint8(x))
- return nil
-}
-
-// SizeVarint returns the varint encoding size of an integer.
-func SizeVarint(x uint64) int {
- return sizeVarint(x)
-}
-
-func sizeVarint(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-
-// EncodeFixed64 writes a 64-bit integer to the Buffer.
-// This is the format for the
-// fixed64, sfixed64, and double protocol buffer types.
-func (p *Buffer) EncodeFixed64(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24),
- uint8(x>>32),
- uint8(x>>40),
- uint8(x>>48),
- uint8(x>>56))
- return nil
-}
-
-func sizeFixed64(x uint64) int {
- return 8
-}
-
-// EncodeFixed32 writes a 32-bit integer to the Buffer.
-// This is the format for the
-// fixed32, sfixed32, and float protocol buffer types.
-func (p *Buffer) EncodeFixed32(x uint64) error {
- p.buf = append(p.buf,
- uint8(x),
- uint8(x>>8),
- uint8(x>>16),
- uint8(x>>24))
- return nil
-}
-
-func sizeFixed32(x uint64) int {
- return 4
-}
-
-// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
-// to the Buffer.
-// This is the format used for the sint64 protocol buffer type.
-func (p *Buffer) EncodeZigzag64(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-func sizeZigzag64(x uint64) int {
- return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
-// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
-// to the Buffer.
-// This is the format used for the sint32 protocol buffer type.
-func (p *Buffer) EncodeZigzag32(x uint64) error {
- // use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-func sizeZigzag32(x uint64) int {
- return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
-// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
-// This is the format used for the bytes protocol buffer
-// type and for embedded messages.
-func (p *Buffer) EncodeRawBytes(b []byte) error {
- p.EncodeVarint(uint64(len(b)))
- p.buf = append(p.buf, b...)
- return nil
-}
-
-func sizeRawBytes(b []byte) int {
- return sizeVarint(uint64(len(b))) +
- len(b)
-}
-
-// EncodeStringBytes writes an encoded string to the Buffer.
-// This is the format used for the proto2 string type.
-func (p *Buffer) EncodeStringBytes(s string) error {
- p.EncodeVarint(uint64(len(s)))
- p.buf = append(p.buf, s...)
- return nil
-}
-
-func sizeStringBytes(s string) int {
- return sizeVarint(uint64(len(s))) +
- len(s)
-}
-
-// Marshaler is the interface representing objects that can marshal themselves.
-type Marshaler interface {
- Marshal() ([]byte, error)
-}
-
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, returning the data.
-func Marshal(pb Message) ([]byte, error) {
- // Can the object marshal itself?
- if m, ok := pb.(Marshaler); ok {
- return m.Marshal()
- }
- p := NewBuffer(nil)
- err := p.Marshal(pb)
- if p.buf == nil && err == nil {
- // Return a non-nil slice on success.
- return []byte{}, nil
- }
- return p.buf, err
-}
-
-// EncodeMessage writes the protocol buffer to the Buffer,
-// prefixed by a varint-encoded length.
-func (p *Buffer) EncodeMessage(pb Message) error {
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return ErrNil
- }
- if err == nil {
- var state errorState
- err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
- }
- return err
-}
-
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-func (p *Buffer) Marshal(pb Message) error {
- // Can the object marshal itself?
- if m, ok := pb.(Marshaler); ok {
- data, err := m.Marshal()
- p.buf = append(p.buf, data...)
- return err
- }
-
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return ErrNil
- }
- if err == nil {
- err = p.enc_struct(GetProperties(t.Elem()), base)
- }
-
- if collectStats {
- (stats).Encode++ // Parens are to work around a goimports bug.
- }
-
- if len(p.buf) > maxMarshalSize {
- return ErrTooLarge
- }
- return err
-}
-
-// Size returns the encoded size of a protocol buffer.
-func Size(pb Message) (n int) {
- // Can the object marshal itself? If so, Size is slow.
- // TODO: add Size to Marshaler, or add a Sizer interface.
- if m, ok := pb.(Marshaler); ok {
- b, _ := m.Marshal()
- return len(b)
- }
-
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return 0
- }
- if err == nil {
- n = size_struct(GetProperties(t.Elem()), base)
- }
-
- if collectStats {
- (stats).Size++ // Parens are to work around a goimports bug.
- }
-
- return
-}
-
-// Individual type encoders.
-
-// Encode a bool.
-func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
- v := *structPointer_Bool(base, p.field)
- if v == nil {
- return ErrNil
- }
- x := 0
- if *v {
- x = 1
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
- v := *structPointer_BoolVal(base, p.field)
- if !v {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, 1)
- return nil
-}
-
-func size_bool(p *Properties, base structPointer) int {
- v := *structPointer_Bool(base, p.field)
- if v == nil {
- return 0
- }
- return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-func size_proto3_bool(p *Properties, base structPointer) int {
- v := *structPointer_BoolVal(base, p.field)
- if !v && !p.oneof {
- return 0
- }
- return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-// Encode an int32.
-func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return ErrNil
- }
- x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32Val(base, p.field)
- x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func size_int32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return 0
- }
- x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-func size_proto3_int32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32Val(base, p.field)
- x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-// Encode a uint32.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return ErrNil
- }
- x := word32_Get(v)
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
- v := structPointer_Word32Val(base, p.field)
- x := word32Val_Get(v)
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func size_uint32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return 0
- }
- x := word32_Get(v)
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-func size_proto3_uint32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32Val(base, p.field)
- x := word32Val_Get(v)
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-// Encode an int64.
-func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64(base, p.field)
- if word64_IsNil(v) {
- return ErrNil
- }
- x := word64_Get(v)
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, x)
- return nil
-}
-
-func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64Val(base, p.field)
- x := word64Val_Get(v)
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, x)
- return nil
-}
-
-func size_int64(p *Properties, base structPointer) (n int) {
- v := structPointer_Word64(base, p.field)
- if word64_IsNil(v) {
- return 0
- }
- x := word64_Get(v)
- n += len(p.tagcode)
- n += p.valSize(x)
- return
-}
-
-func size_proto3_int64(p *Properties, base structPointer) (n int) {
- v := structPointer_Word64Val(base, p.field)
- x := word64Val_Get(v)
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(x)
- return
-}
-
-// Encode a string.
-func (o *Buffer) enc_string(p *Properties, base structPointer) error {
- v := *structPointer_String(base, p.field)
- if v == nil {
- return ErrNil
- }
- x := *v
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(x)
- return nil
-}
-
-func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
- v := *structPointer_StringVal(base, p.field)
- if v == "" {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(v)
- return nil
-}
-
-func size_string(p *Properties, base structPointer) (n int) {
- v := *structPointer_String(base, p.field)
- if v == nil {
- return 0
- }
- x := *v
- n += len(p.tagcode)
- n += sizeStringBytes(x)
- return
-}
-
-func size_proto3_string(p *Properties, base structPointer) (n int) {
- v := *structPointer_StringVal(base, p.field)
- if v == "" && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeStringBytes(v)
- return
-}
-
-// All protocol buffer fields are nillable, but be careful.
-func isNil(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return v.IsNil()
- }
- return false
-}
-
-// Encode a message struct.
-func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
- var state errorState
- structp := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(structp) {
- return ErrNil
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, err := m.Marshal()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(data)
- return state.err
- }
-
- o.buf = append(o.buf, p.tagcode...)
- return o.enc_len_struct(p.sprop, structp, &state)
-}
-
-func size_struct_message(p *Properties, base structPointer) int {
- structp := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(structp) {
- return 0
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, _ := m.Marshal()
- n0 := len(p.tagcode)
- n1 := sizeRawBytes(data)
- return n0 + n1
- }
-
- n0 := len(p.tagcode)
- n1 := size_struct(p.sprop, structp)
- n2 := sizeVarint(uint64(n1)) // size of encoded length
- return n0 + n1 + n2
-}
-
-// Encode a group struct.
-func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
- var state errorState
- b := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(b) {
- return ErrNil
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
- err := o.enc_struct(p.sprop, b)
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
- return state.err
-}
-
-func size_struct_group(p *Properties, base structPointer) (n int) {
- b := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(b) {
- return 0
- }
-
- n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
- n += size_struct(p.sprop, b)
- n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
- return
-}
-
-// Encode a slice of bools ([]bool).
-func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return ErrNil
- }
- for _, x := range s {
- o.buf = append(o.buf, p.tagcode...)
- v := uint64(0)
- if x {
- v = 1
- }
- p.valEnc(o, v)
- }
- return nil
-}
-
-func size_slice_bool(p *Properties, base structPointer) int {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return 0
- }
- return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
-}
-
-// Encode a slice of bools ([]bool) in packed format.
-func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
- for _, x := range s {
- v := uint64(0)
- if x {
- v = 1
- }
- p.valEnc(o, v)
- }
- return nil
-}
-
-func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return 0
- }
- n += len(p.tagcode)
- n += sizeVarint(uint64(l))
- n += l // each bool takes exactly one byte
- return
-}
-
-// Encode a slice of bytes ([]byte).
-func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
- s := *structPointer_Bytes(base, p.field)
- if s == nil {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(s)
- return nil
-}
-
-func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
- s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(s)
- return nil
-}
-
-func size_slice_byte(p *Properties, base structPointer) (n int) {
- s := *structPointer_Bytes(base, p.field)
- if s == nil && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeRawBytes(s)
- return
-}
-
-func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
- s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeRawBytes(s)
- return
-}
-
-// Encode a slice of int32s ([]int32).
-func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- p.valEnc(o, uint64(x))
- }
- return nil
-}
-
-func size_slice_int32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- n += p.valSize(uint64(x))
- }
- return
-}
-
-// Encode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- p.valEnc(buf, uint64(x))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- bufSize += p.valSize(uint64(x))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of uint32s ([]uint32).
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- x := s.Index(i)
- p.valEnc(o, uint64(x))
- }
- return nil
-}
-
-func size_slice_uint32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- x := s.Index(i)
- n += p.valSize(uint64(x))
- }
- return
-}
-
-// Encode a slice of uint32s ([]uint32) in packed format.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- p.valEnc(buf, uint64(s.Index(i)))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- bufSize += p.valSize(uint64(s.Index(i)))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of int64s ([]int64).
-func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, s.Index(i))
- }
- return nil
-}
-
-func size_slice_int64(p *Properties, base structPointer) (n int) {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- n += p.valSize(s.Index(i))
- }
- return
-}
-
-// Encode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- p.valEnc(buf, s.Index(i))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- bufSize += p.valSize(s.Index(i))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of slice of bytes ([][]byte).
-func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
- ss := *structPointer_BytesSlice(base, p.field)
- l := len(ss)
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(ss[i])
- }
- return nil
-}
-
-func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
- ss := *structPointer_BytesSlice(base, p.field)
- l := len(ss)
- if l == 0 {
- return 0
- }
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- n += sizeRawBytes(ss[i])
- }
- return
-}
-
-// Encode a slice of strings ([]string).
-func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
- ss := *structPointer_StringSlice(base, p.field)
- l := len(ss)
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(ss[i])
- }
- return nil
-}
-
-func size_slice_string(p *Properties, base structPointer) (n int) {
- ss := *structPointer_StringSlice(base, p.field)
- l := len(ss)
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- n += sizeStringBytes(ss[i])
- }
- return
-}
-
-// Encode a slice of message structs ([]*struct).
-func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
- var state errorState
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- for i := 0; i < l; i++ {
- structp := s.Index(i)
- if structPointer_IsNil(structp) {
- return errRepeatedHasNil
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, err := m.Marshal()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(data)
- continue
- }
-
- o.buf = append(o.buf, p.tagcode...)
- err := o.enc_len_struct(p.sprop, structp, &state)
- if err != nil && !state.shouldContinue(err, nil) {
- if err == ErrNil {
- return errRepeatedHasNil
- }
- return err
- }
- }
- return state.err
-}
-
-func size_slice_struct_message(p *Properties, base structPointer) (n int) {
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- structp := s.Index(i)
- if structPointer_IsNil(structp) {
- return // return the size up to this point
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, _ := m.Marshal()
- n += sizeRawBytes(data)
- continue
- }
-
- n0 := size_struct(p.sprop, structp)
- n1 := sizeVarint(uint64(n0)) // size of encoded length
- n += n0 + n1
- }
- return
-}
-
-// Encode a slice of group structs ([]*struct).
-func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
- var state errorState
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- for i := 0; i < l; i++ {
- b := s.Index(i)
- if structPointer_IsNil(b) {
- return errRepeatedHasNil
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
-
- err := o.enc_struct(p.sprop, b)
-
- if err != nil && !state.shouldContinue(err, nil) {
- if err == ErrNil {
- return errRepeatedHasNil
- }
- return err
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
- }
- return state.err
-}
-
-func size_slice_struct_group(p *Properties, base structPointer) (n int) {
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
- n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
- for i := 0; i < l; i++ {
- b := s.Index(i)
- if structPointer_IsNil(b) {
- return // return size up to this point
- }
-
- n += size_struct(p.sprop, b)
- }
- return
-}
-
-// Encode an extension map.
-func (o *Buffer) enc_map(p *Properties, base structPointer) error {
- exts := structPointer_ExtMap(base, p.field)
- if err := encodeExtensionsMap(*exts); err != nil {
- return err
- }
-
- return o.enc_map_body(*exts)
-}
-
-func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
- exts := structPointer_Extensions(base, p.field)
- if err := encodeExtensions(exts); err != nil {
- return err
- }
- v, _ := exts.extensionsRead()
-
- return o.enc_map_body(v)
-}
-
-func (o *Buffer) enc_map_body(v map[int32]Extension) error {
- // Fast-path for common cases: zero or one extensions.
- if len(v) <= 1 {
- for _, e := range v {
- o.buf = append(o.buf, e.enc...)
- }
- return nil
- }
-
- // Sort keys to provide a deterministic encoding.
- keys := make([]int, 0, len(v))
- for k := range v {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, k := range keys {
- o.buf = append(o.buf, v[int32(k)].enc...)
- }
- return nil
-}
-
-func size_map(p *Properties, base structPointer) int {
- v := structPointer_ExtMap(base, p.field)
- return extensionsMapSize(*v)
-}
-
-func size_exts(p *Properties, base structPointer) int {
- v := structPointer_Extensions(base, p.field)
- return extensionsSize(v)
-}
-
-// Encode a map field.
-func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
- var state errorState // XXX: or do we need to plumb this through?
-
- /*
- A map defined as
- map map_field = N;
- is encoded in the same way as
- message MapFieldEntry {
- key_type key = 1;
- value_type value = 2;
- }
- repeated MapFieldEntry map_field = N;
- */
-
- v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
- if v.Len() == 0 {
- return nil
- }
-
- keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
- enc := func() error {
- if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
- return err
- }
- if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
- return err
- }
- return nil
- }
-
- // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
- for _, key := range v.MapKeys() {
- val := v.MapIndex(key)
-
- keycopy.Set(key)
- valcopy.Set(val)
-
- o.buf = append(o.buf, p.tagcode...)
- if err := o.enc_len_thing(enc, &state); err != nil {
- return err
- }
- }
- return nil
-}
-
-func size_new_map(p *Properties, base structPointer) int {
- v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
-
- keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
- n := 0
- for _, key := range v.MapKeys() {
- val := v.MapIndex(key)
- keycopy.Set(key)
- valcopy.Set(val)
-
- // Tag codes for key and val are the responsibility of the sub-sizer.
- keysize := p.mkeyprop.size(p.mkeyprop, keybase)
- valsize := p.mvalprop.size(p.mvalprop, valbase)
- entry := keysize + valsize
- // Add on tag code and length of map entry itself.
- n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
- }
- return n
-}
-
-// mapEncodeScratch returns a new reflect.Value matching the map's value type,
-// and a structPointer suitable for passing to an encoder or sizer.
-func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
- // Prepare addressable doubly-indirect placeholders for the key and value types.
- // This is needed because the element-type encoders expect **T, but the map iteration produces T.
-
- keycopy = reflect.New(mapType.Key()).Elem() // addressable K
- keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
- keyptr.Set(keycopy.Addr()) //
- keybase = toStructPointer(keyptr.Addr()) // **K
-
- // Value types are more varied and require special handling.
- switch mapType.Elem().Kind() {
- case reflect.Slice:
- // []byte
- var dummy []byte
- valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
- valbase = toStructPointer(valcopy.Addr())
- case reflect.Ptr:
- // message; the generated field type is map[K]*Msg (so V is *Msg),
- // so we only need one level of indirection.
- valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
- valbase = toStructPointer(valcopy.Addr())
- default:
- // everything else
- valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
- valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
- valptr.Set(valcopy.Addr()) //
- valbase = toStructPointer(valptr.Addr()) // **V
- }
- return
-}
-
-// Encode a struct.
-func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
- var state errorState
- // Encode fields in tag order so that decoders may use optimizations
- // that depend on the ordering.
- // https://developers.google.com/protocol-buffers/docs/encoding#order
- for _, i := range prop.order {
- p := prop.Prop[i]
- if p.enc != nil {
- err := p.enc(o, p, base)
- if err != nil {
- if err == ErrNil {
- if p.Required && state.err == nil {
- state.err = &RequiredNotSetError{p.Name}
- }
- } else if err == errRepeatedHasNil {
- // Give more context to nil values in repeated fields.
- return errors.New("repeated field " + p.OrigName + " has nil element")
- } else if !state.shouldContinue(err, p) {
- return err
- }
- }
- if len(o.buf) > maxMarshalSize {
- return ErrTooLarge
- }
- }
- }
-
- // Do oneof fields.
- if prop.oneofMarshaler != nil {
- m := structPointer_Interface(base, prop.stype).(Message)
- if err := prop.oneofMarshaler(m, o); err == ErrNil {
- return errOneofHasNil
- } else if err != nil {
- return err
- }
- }
-
- // Add unrecognized fields at the end.
- if prop.unrecField.IsValid() {
- v := *structPointer_Bytes(base, prop.unrecField)
- if len(o.buf)+len(v) > maxMarshalSize {
- return ErrTooLarge
- }
- if len(v) > 0 {
- o.buf = append(o.buf, v...)
- }
- }
-
- return state.err
-}
-
-func size_struct(prop *StructProperties, base structPointer) (n int) {
- for _, i := range prop.order {
- p := prop.Prop[i]
- if p.size != nil {
- n += p.size(p, base)
- }
- }
-
- // Add unrecognized fields at the end.
- if prop.unrecField.IsValid() {
- v := *structPointer_Bytes(base, prop.unrecField)
- n += len(v)
- }
-
- // Factor in any oneof fields.
- if prop.oneofSizer != nil {
- m := structPointer_Interface(base, prop.stype).(Message)
- n += prop.oneofSizer(m)
- }
-
- return
-}
-
-var zeroes [20]byte // longer than any conceivable sizeVarint
-
-// Encode a struct, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
- return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
-}
-
-// Encode something, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
- iLen := len(o.buf)
- o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
- iMsg := len(o.buf)
- err := enc()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- lMsg := len(o.buf) - iMsg
- lLen := sizeVarint(uint64(lMsg))
- switch x := lLen - (iMsg - iLen); {
- case x > 0: // actual length is x bytes larger than the space we reserved
- // Move msg x bytes right.
- o.buf = append(o.buf, zeroes[:x]...)
- copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
- case x < 0: // actual length is x bytes smaller than the space we reserved
- // Move msg x bytes left.
- copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
- o.buf = o.buf[:len(o.buf)+x] // x is negative
- }
- // Encode the length in the reserved space.
- o.buf = o.buf[:iLen]
- o.EncodeVarint(uint64(lMsg))
- o.buf = o.buf[:len(o.buf)+lMsg]
- return state.err
-}
-
-// errorState maintains the first error that occurs and updates that error
-// with additional context.
-type errorState struct {
- err error
-}
-
-// shouldContinue reports whether encoding should continue upon encountering the
-// given error. If the error is RequiredNotSetError, shouldContinue returns true
-// and, if this is the first appearance of that error, remembers it for future
-// reporting.
-//
-// If prop is not nil, it may update any error with additional context about the
-// field with the error.
-func (s *errorState) shouldContinue(err error, prop *Properties) bool {
- // Ignore unset required fields.
- reqNotSet, ok := err.(*RequiredNotSetError)
- if !ok {
- return false
- }
- if s.err == nil {
- if prop != nil {
- err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
- }
- s.err = err
- }
- return true
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/encode_test.go b/vendor/src/github.com/golang/protobuf/proto/encode_test.go
deleted file mode 100644
index 0b36a0e..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/encode_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "strconv"
- "testing"
-
- "github.com/golang/protobuf/proto"
- tpb "github.com/golang/protobuf/proto/proto3_proto"
- "github.com/golang/protobuf/ptypes"
-)
-
-var (
- blackhole []byte
-)
-
-// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the
-// same.
-func BenchmarkAny(b *testing.B) {
- data := make([]byte, 1<<20)
- quantum := 1 << 10
- for i := uint(0); i <= 10; i++ {
- b.Run(strconv.Itoa(quantum<= len(o.buf) {
- break
- }
- }
- return value.Interface(), nil
-}
-
-// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
-// The returned slice has the same length as es; missing extensions will appear as nil elements.
-func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, errors.New("proto: not an extendable proto")
- }
- extensions = make([]interface{}, len(es))
- for i, e := range es {
- extensions[i], err = GetExtension(epb, e)
- if err == ErrMissingExtension {
- err = nil
- }
- if err != nil {
- return
- }
- }
- return
-}
-
-// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
-// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
-// just the Field field, which defines the extension's field number.
-func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
- }
- registeredExtensions := RegisteredExtensions(pb)
-
- emap, mu := epb.extensionsRead()
- if emap == nil {
- return nil, nil
- }
- mu.Lock()
- defer mu.Unlock()
- extensions := make([]*ExtensionDesc, 0, len(emap))
- for extid, e := range emap {
- desc := e.desc
- if desc == nil {
- desc = registeredExtensions[extid]
- if desc == nil {
- desc = &ExtensionDesc{Field: extid}
- }
- }
-
- extensions = append(extensions, desc)
- }
- return extensions, nil
-}
-
-// SetExtension sets the specified extension of pb to the specified value.
-func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
- epb, ok := extendable(pb)
- if !ok {
- return errors.New("proto: not an extendable proto")
- }
- if err := checkExtensionTypes(epb, extension); err != nil {
- return err
- }
- typ := reflect.TypeOf(extension.ExtensionType)
- if typ != reflect.TypeOf(value) {
- return errors.New("proto: bad extension value type")
- }
- // nil extension values need to be caught early, because the
- // encoder can't distinguish an ErrNil due to a nil extension
- // from an ErrNil due to a missing field. Extensions are
- // always optional, so the encoder would just swallow the error
- // and drop all the extensions from the encoded message.
- if reflect.ValueOf(value).IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
- }
-
- extmap := epb.extensionsWrite()
- extmap[extension.Field] = Extension{desc: extension, value: value}
- return nil
-}
-
-// ClearAllExtensions clears all extensions from pb.
-func ClearAllExtensions(pb Message) {
- epb, ok := extendable(pb)
- if !ok {
- return
- }
- m := epb.extensionsWrite()
- for k := range m {
- delete(m, k)
- }
-}
-
-// A global registry of extensions.
-// The generated code will register the generated descriptors by calling RegisterExtension.
-
-var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
-
-// RegisterExtension is called from the generated code.
-func RegisterExtension(desc *ExtensionDesc) {
- st := reflect.TypeOf(desc.ExtendedType).Elem()
- m := extensionMaps[st]
- if m == nil {
- m = make(map[int32]*ExtensionDesc)
- extensionMaps[st] = m
- }
- if _, ok := m[desc.Field]; ok {
- panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
- }
- m[desc.Field] = desc
-}
-
-// RegisteredExtensions returns a map of the registered extensions of a
-// protocol buffer struct, indexed by the extension number.
-// The argument pb should be a nil pointer to the struct type.
-func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
- return extensionMaps[reflect.TypeOf(pb).Elem()]
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/extensions_test.go b/vendor/src/github.com/golang/protobuf/proto/extensions_test.go
deleted file mode 100644
index 403d7c6..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/extensions_test.go
+++ /dev/null
@@ -1,508 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2014 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "sort"
- "testing"
-
- "github.com/golang/protobuf/proto"
- pb "github.com/golang/protobuf/proto/testdata"
-)
-
-func TestGetExtensionsWithMissingExtensions(t *testing.T) {
- msg := &pb.MyMessage{}
- ext1 := &pb.Ext{}
- if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
- t.Fatalf("Could not set ext1: %s", err)
- }
- exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
- pb.E_Ext_More,
- pb.E_Ext_Text,
- })
- if err != nil {
- t.Fatalf("GetExtensions() failed: %s", err)
- }
- if exts[0] != ext1 {
- t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
- }
- if exts[1] != nil {
- t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
- }
-}
-
-func TestExtensionDescsWithMissingExtensions(t *testing.T) {
- msg := &pb.MyMessage{Count: proto.Int32(0)}
- extdesc1 := pb.E_Ext_More
- if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil {
- t.Errorf("proto.ExtensionDescs: got %d descs, error %v; want 0, nil", len(descs), err)
- }
-
- ext1 := &pb.Ext{}
- if err := proto.SetExtension(msg, extdesc1, ext1); err != nil {
- t.Fatalf("Could not set ext1: %s", err)
- }
- extdesc2 := &proto.ExtensionDesc{
- ExtendedType: (*pb.MyMessage)(nil),
- ExtensionType: (*bool)(nil),
- Field: 123456789,
- Name: "a.b",
- Tag: "varint,123456789,opt",
- }
- ext2 := proto.Bool(false)
- if err := proto.SetExtension(msg, extdesc2, ext2); err != nil {
- t.Fatalf("Could not set ext2: %s", err)
- }
-
- b, err := proto.Marshal(msg)
- if err != nil {
- t.Fatalf("Could not marshal msg: %v", err)
- }
- if err := proto.Unmarshal(b, msg); err != nil {
- t.Fatalf("Could not unmarshal into msg: %v", err)
- }
-
- descs, err := proto.ExtensionDescs(msg)
- if err != nil {
- t.Fatalf("proto.ExtensionDescs: got error %v", err)
- }
- sortExtDescs(descs)
- wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}}
- if !reflect.DeepEqual(descs, wantDescs) {
- t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs)
- }
-}
-
-type ExtensionDescSlice []*proto.ExtensionDesc
-
-func (s ExtensionDescSlice) Len() int { return len(s) }
-func (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field }
-func (s ExtensionDescSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-func sortExtDescs(s []*proto.ExtensionDesc) {
- sort.Sort(ExtensionDescSlice(s))
-}
-
-func TestGetExtensionStability(t *testing.T) {
- check := func(m *pb.MyMessage) bool {
- ext1, err := proto.GetExtension(m, pb.E_Ext_More)
- if err != nil {
- t.Fatalf("GetExtension() failed: %s", err)
- }
- ext2, err := proto.GetExtension(m, pb.E_Ext_More)
- if err != nil {
- t.Fatalf("GetExtension() failed: %s", err)
- }
- return ext1 == ext2
- }
- msg := &pb.MyMessage{Count: proto.Int32(4)}
- ext0 := &pb.Ext{}
- if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
- t.Fatalf("Could not set ext1: %s", ext0)
- }
- if !check(msg) {
- t.Errorf("GetExtension() not stable before marshaling")
- }
- bb, err := proto.Marshal(msg)
- if err != nil {
- t.Fatalf("Marshal() failed: %s", err)
- }
- msg1 := &pb.MyMessage{}
- err = proto.Unmarshal(bb, msg1)
- if err != nil {
- t.Fatalf("Unmarshal() failed: %s", err)
- }
- if !check(msg1) {
- t.Errorf("GetExtension() not stable after unmarshaling")
- }
-}
-
-func TestGetExtensionDefaults(t *testing.T) {
- var setFloat64 float64 = 1
- var setFloat32 float32 = 2
- var setInt32 int32 = 3
- var setInt64 int64 = 4
- var setUint32 uint32 = 5
- var setUint64 uint64 = 6
- var setBool = true
- var setBool2 = false
- var setString = "Goodnight string"
- var setBytes = []byte("Goodnight bytes")
- var setEnum = pb.DefaultsMessage_TWO
-
- type testcase struct {
- ext *proto.ExtensionDesc // Extension we are testing.
- want interface{} // Expected value of extension, or nil (meaning that GetExtension will fail).
- def interface{} // Expected value of extension after ClearExtension().
- }
- tests := []testcase{
- {pb.E_NoDefaultDouble, setFloat64, nil},
- {pb.E_NoDefaultFloat, setFloat32, nil},
- {pb.E_NoDefaultInt32, setInt32, nil},
- {pb.E_NoDefaultInt64, setInt64, nil},
- {pb.E_NoDefaultUint32, setUint32, nil},
- {pb.E_NoDefaultUint64, setUint64, nil},
- {pb.E_NoDefaultSint32, setInt32, nil},
- {pb.E_NoDefaultSint64, setInt64, nil},
- {pb.E_NoDefaultFixed32, setUint32, nil},
- {pb.E_NoDefaultFixed64, setUint64, nil},
- {pb.E_NoDefaultSfixed32, setInt32, nil},
- {pb.E_NoDefaultSfixed64, setInt64, nil},
- {pb.E_NoDefaultBool, setBool, nil},
- {pb.E_NoDefaultBool, setBool2, nil},
- {pb.E_NoDefaultString, setString, nil},
- {pb.E_NoDefaultBytes, setBytes, nil},
- {pb.E_NoDefaultEnum, setEnum, nil},
- {pb.E_DefaultDouble, setFloat64, float64(3.1415)},
- {pb.E_DefaultFloat, setFloat32, float32(3.14)},
- {pb.E_DefaultInt32, setInt32, int32(42)},
- {pb.E_DefaultInt64, setInt64, int64(43)},
- {pb.E_DefaultUint32, setUint32, uint32(44)},
- {pb.E_DefaultUint64, setUint64, uint64(45)},
- {pb.E_DefaultSint32, setInt32, int32(46)},
- {pb.E_DefaultSint64, setInt64, int64(47)},
- {pb.E_DefaultFixed32, setUint32, uint32(48)},
- {pb.E_DefaultFixed64, setUint64, uint64(49)},
- {pb.E_DefaultSfixed32, setInt32, int32(50)},
- {pb.E_DefaultSfixed64, setInt64, int64(51)},
- {pb.E_DefaultBool, setBool, true},
- {pb.E_DefaultBool, setBool2, true},
- {pb.E_DefaultString, setString, "Hello, string"},
- {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")},
- {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE},
- }
-
- checkVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error {
- val, err := proto.GetExtension(msg, test.ext)
- if err != nil {
- if valWant != nil {
- return fmt.Errorf("GetExtension(): %s", err)
- }
- if want := proto.ErrMissingExtension; err != want {
- return fmt.Errorf("Unexpected error: got %v, want %v", err, want)
- }
- return nil
- }
-
- // All proto2 extension values are either a pointer to a value or a slice of values.
- ty := reflect.TypeOf(val)
- tyWant := reflect.TypeOf(test.ext.ExtensionType)
- if got, want := ty, tyWant; got != want {
- return fmt.Errorf("unexpected reflect.TypeOf(): got %v want %v", got, want)
- }
- tye := ty.Elem()
- tyeWant := tyWant.Elem()
- if got, want := tye, tyeWant; got != want {
- return fmt.Errorf("unexpected reflect.TypeOf().Elem(): got %v want %v", got, want)
- }
-
- // Check the name of the type of the value.
- // If it is an enum it will be type int32 with the name of the enum.
- if got, want := tye.Name(), tye.Name(); got != want {
- return fmt.Errorf("unexpected reflect.TypeOf().Elem().Name(): got %v want %v", got, want)
- }
-
- // Check that value is what we expect.
- // If we have a pointer in val, get the value it points to.
- valExp := val
- if ty.Kind() == reflect.Ptr {
- valExp = reflect.ValueOf(val).Elem().Interface()
- }
- if got, want := valExp, valWant; !reflect.DeepEqual(got, want) {
- return fmt.Errorf("unexpected reflect.DeepEqual(): got %v want %v", got, want)
- }
-
- return nil
- }
-
- setTo := func(test testcase) interface{} {
- setTo := reflect.ValueOf(test.want)
- if typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr {
- setTo = reflect.New(typ).Elem()
- setTo.Set(reflect.New(setTo.Type().Elem()))
- setTo.Elem().Set(reflect.ValueOf(test.want))
- }
- return setTo.Interface()
- }
-
- for _, test := range tests {
- msg := &pb.DefaultsMessage{}
- name := test.ext.Name
-
- // Check the initial value.
- if err := checkVal(test, msg, test.def); err != nil {
- t.Errorf("%s: %v", name, err)
- }
-
- // Set the per-type value and check value.
- name = fmt.Sprintf("%s (set to %T %v)", name, test.want, test.want)
- if err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil {
- t.Errorf("%s: SetExtension(): %v", name, err)
- continue
- }
- if err := checkVal(test, msg, test.want); err != nil {
- t.Errorf("%s: %v", name, err)
- continue
- }
-
- // Set and check the value.
- name += " (cleared)"
- proto.ClearExtension(msg, test.ext)
- if err := checkVal(test, msg, test.def); err != nil {
- t.Errorf("%s: %v", name, err)
- }
- }
-}
-
-func TestExtensionsRoundTrip(t *testing.T) {
- msg := &pb.MyMessage{}
- ext1 := &pb.Ext{
- Data: proto.String("hi"),
- }
- ext2 := &pb.Ext{
- Data: proto.String("there"),
- }
- exists := proto.HasExtension(msg, pb.E_Ext_More)
- if exists {
- t.Error("Extension More present unexpectedly")
- }
- if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
- t.Error(err)
- }
- if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
- t.Error(err)
- }
- e, err := proto.GetExtension(msg, pb.E_Ext_More)
- if err != nil {
- t.Error(err)
- }
- x, ok := e.(*pb.Ext)
- if !ok {
- t.Errorf("e has type %T, expected testdata.Ext", e)
- } else if *x.Data != "there" {
- t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
- }
- proto.ClearExtension(msg, pb.E_Ext_More)
- if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
- t.Errorf("got %v, expected ErrMissingExtension", e)
- }
- if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
- t.Error("expected bad extension error, got nil")
- }
- if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
- t.Error("expected extension err")
- }
- if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
- t.Error("expected some sort of type mismatch error, got nil")
- }
-}
-
-func TestNilExtension(t *testing.T) {
- msg := &pb.MyMessage{
- Count: proto.Int32(1),
- }
- if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil {
- t.Fatal(err)
- }
- if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {
- t.Error("expected SetExtension to fail due to a nil extension")
- } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want {
- t.Errorf("expected error %v, got %v", want, err)
- }
- // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update
- // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.
-}
-
-func TestMarshalUnmarshalRepeatedExtension(t *testing.T) {
- // Add a repeated extension to the result.
- tests := []struct {
- name string
- ext []*pb.ComplexExtension
- }{
- {
- "two fields",
- []*pb.ComplexExtension{
- {First: proto.Int32(7)},
- {Second: proto.Int32(11)},
- },
- },
- {
- "repeated field",
- []*pb.ComplexExtension{
- {Third: []int32{1000}},
- {Third: []int32{2000}},
- },
- },
- {
- "two fields and repeated field",
- []*pb.ComplexExtension{
- {Third: []int32{1000}},
- {First: proto.Int32(9)},
- {Second: proto.Int32(21)},
- {Third: []int32{2000}},
- },
- },
- }
- for _, test := range tests {
- // Marshal message with a repeated extension.
- msg1 := new(pb.OtherMessage)
- err := proto.SetExtension(msg1, pb.E_RComplex, test.ext)
- if err != nil {
- t.Fatalf("[%s] Error setting extension: %v", test.name, err)
- }
- b, err := proto.Marshal(msg1)
- if err != nil {
- t.Fatalf("[%s] Error marshaling message: %v", test.name, err)
- }
-
- // Unmarshal and read the merged proto.
- msg2 := new(pb.OtherMessage)
- err = proto.Unmarshal(b, msg2)
- if err != nil {
- t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
- }
- e, err := proto.GetExtension(msg2, pb.E_RComplex)
- if err != nil {
- t.Fatalf("[%s] Error getting extension: %v", test.name, err)
- }
- ext := e.([]*pb.ComplexExtension)
- if ext == nil {
- t.Fatalf("[%s] Invalid extension", test.name)
- }
- if !reflect.DeepEqual(ext, test.ext) {
- t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext)
- }
- }
-}
-
-func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) {
- // We may see multiple instances of the same extension in the wire
- // format. For example, the proto compiler may encode custom options in
- // this way. Here, we verify that we merge the extensions together.
- tests := []struct {
- name string
- ext []*pb.ComplexExtension
- }{
- {
- "two fields",
- []*pb.ComplexExtension{
- {First: proto.Int32(7)},
- {Second: proto.Int32(11)},
- },
- },
- {
- "repeated field",
- []*pb.ComplexExtension{
- {Third: []int32{1000}},
- {Third: []int32{2000}},
- },
- },
- {
- "two fields and repeated field",
- []*pb.ComplexExtension{
- {Third: []int32{1000}},
- {First: proto.Int32(9)},
- {Second: proto.Int32(21)},
- {Third: []int32{2000}},
- },
- },
- }
- for _, test := range tests {
- var buf bytes.Buffer
- var want pb.ComplexExtension
-
- // Generate a serialized representation of a repeated extension
- // by catenating bytes together.
- for i, e := range test.ext {
- // Merge to create the wanted proto.
- proto.Merge(&want, e)
-
- // serialize the message
- msg := new(pb.OtherMessage)
- err := proto.SetExtension(msg, pb.E_Complex, e)
- if err != nil {
- t.Fatalf("[%s] Error setting extension %d: %v", test.name, i, err)
- }
- b, err := proto.Marshal(msg)
- if err != nil {
- t.Fatalf("[%s] Error marshaling message %d: %v", test.name, i, err)
- }
- buf.Write(b)
- }
-
- // Unmarshal and read the merged proto.
- msg2 := new(pb.OtherMessage)
- err := proto.Unmarshal(buf.Bytes(), msg2)
- if err != nil {
- t.Fatalf("[%s] Error unmarshaling message: %v", test.name, err)
- }
- e, err := proto.GetExtension(msg2, pb.E_Complex)
- if err != nil {
- t.Fatalf("[%s] Error getting extension: %v", test.name, err)
- }
- ext := e.(*pb.ComplexExtension)
- if ext == nil {
- t.Fatalf("[%s] Invalid extension", test.name)
- }
- if !reflect.DeepEqual(*ext, want) {
- t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, want)
- }
- }
-}
-
-func TestClearAllExtensions(t *testing.T) {
- // unregistered extension
- desc := &proto.ExtensionDesc{
- ExtendedType: (*pb.MyMessage)(nil),
- ExtensionType: (*bool)(nil),
- Field: 101010100,
- Name: "emptyextension",
- Tag: "varint,0,opt",
- }
- m := &pb.MyMessage{}
- if proto.HasExtension(m, desc) {
- t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
- }
- if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {
- t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err)
- }
- if !proto.HasExtension(m, desc) {
- t.Errorf("proto.HasExtension(%s): got false, want true", proto.MarshalTextString(m))
- }
- proto.ClearAllExtensions(m)
- if proto.HasExtension(m, desc) {
- t.Errorf("proto.HasExtension(%s): got true, want false", proto.MarshalTextString(m))
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/lib.go b/vendor/src/github.com/golang/protobuf/proto/lib.go
deleted file mode 100644
index ac4ddbc..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/lib.go
+++ /dev/null
@@ -1,898 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-/*
-Package proto converts data structures to and from the wire format of
-protocol buffers. It works in concert with the Go source code generated
-for .proto files by the protocol compiler.
-
-A summary of the properties of the protocol buffer interface
-for a protocol buffer variable v:
-
- - Names are turned from camel_case to CamelCase for export.
- - There are no methods on v to set fields; just treat
- them as structure fields.
- - There are getters that return a field's value if set,
- and return the field's default value if unset.
- The getters work even if the receiver is a nil message.
- - The zero value for a struct is its correct initialization state.
- All desired fields must be set before marshaling.
- - A Reset() method will restore a protobuf struct to its zero state.
- - Non-repeated fields are pointers to the values; nil means unset.
- That is, optional or required field int32 f becomes F *int32.
- - Repeated fields are slices.
- - Helper functions are available to aid the setting of fields.
- msg.Foo = proto.String("hello") // set field
- - Constants are defined to hold the default values of all fields that
- have them. They have the form Default_StructName_FieldName.
- Because the getter methods handle defaulted values,
- direct use of these constants should be rare.
- - Enums are given type names and maps from names to values.
- Enum values are prefixed by the enclosing message's name, or by the
- enum's type name if it is a top-level enum. Enum types have a String
- method, and a Enum method to assist in message construction.
- - Nested messages, groups and enums have type names prefixed with the name of
- the surrounding message type.
- - Extensions are given descriptor names that start with E_,
- followed by an underscore-delimited list of the nested messages
- that contain it (if any) followed by the CamelCased name of the
- extension field itself. HasExtension, ClearExtension, GetExtension
- and SetExtension are functions for manipulating extensions.
- - Oneof field sets are given a single field in their message,
- with distinguished wrapper types for each possible field value.
- - Marshal and Unmarshal are functions to encode and decode the wire format.
-
-When the .proto file specifies `syntax="proto3"`, there are some differences:
-
- - Non-repeated fields of non-message type are values instead of pointers.
- - Getters are only generated for message and oneof fields.
- - Enum types do not get an Enum method.
-
-The simplest way to describe this is to see an example.
-Given file test.proto, containing
-
- package example;
-
- enum FOO { X = 17; }
-
- message Test {
- required string label = 1;
- optional int32 type = 2 [default=77];
- repeated int64 reps = 3;
- optional group OptionalGroup = 4 {
- required string RequiredField = 5;
- }
- oneof union {
- int32 number = 6;
- string name = 7;
- }
- }
-
-The resulting file, test.pb.go, is:
-
- package example
-
- import proto "github.com/golang/protobuf/proto"
- import math "math"
-
- type FOO int32
- const (
- FOO_X FOO = 17
- )
- var FOO_name = map[int32]string{
- 17: "X",
- }
- var FOO_value = map[string]int32{
- "X": 17,
- }
-
- func (x FOO) Enum() *FOO {
- p := new(FOO)
- *p = x
- return p
- }
- func (x FOO) String() string {
- return proto.EnumName(FOO_name, int32(x))
- }
- func (x *FOO) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FOO_value, data)
- if err != nil {
- return err
- }
- *x = FOO(value)
- return nil
- }
-
- type Test struct {
- Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
- Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
- Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
- Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
- // Types that are valid to be assigned to Union:
- // *Test_Number
- // *Test_Name
- Union isTest_Union `protobuf_oneof:"union"`
- XXX_unrecognized []byte `json:"-"`
- }
- func (m *Test) Reset() { *m = Test{} }
- func (m *Test) String() string { return proto.CompactTextString(m) }
- func (*Test) ProtoMessage() {}
-
- type isTest_Union interface {
- isTest_Union()
- }
-
- type Test_Number struct {
- Number int32 `protobuf:"varint,6,opt,name=number"`
- }
- type Test_Name struct {
- Name string `protobuf:"bytes,7,opt,name=name"`
- }
-
- func (*Test_Number) isTest_Union() {}
- func (*Test_Name) isTest_Union() {}
-
- func (m *Test) GetUnion() isTest_Union {
- if m != nil {
- return m.Union
- }
- return nil
- }
- const Default_Test_Type int32 = 77
-
- func (m *Test) GetLabel() string {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return ""
- }
-
- func (m *Test) GetType() int32 {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return Default_Test_Type
- }
-
- func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
- if m != nil {
- return m.Optionalgroup
- }
- return nil
- }
-
- type Test_OptionalGroup struct {
- RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
- }
- func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
- func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
-
- func (m *Test_OptionalGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
- }
-
- func (m *Test) GetNumber() int32 {
- if x, ok := m.GetUnion().(*Test_Number); ok {
- return x.Number
- }
- return 0
- }
-
- func (m *Test) GetName() string {
- if x, ok := m.GetUnion().(*Test_Name); ok {
- return x.Name
- }
- return ""
- }
-
- func init() {
- proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
- }
-
-To create and play with a Test object:
-
- package main
-
- import (
- "log"
-
- "github.com/golang/protobuf/proto"
- pb "./example.pb"
- )
-
- func main() {
- test := &pb.Test{
- Label: proto.String("hello"),
- Type: proto.Int32(17),
- Reps: []int64{1, 2, 3},
- Optionalgroup: &pb.Test_OptionalGroup{
- RequiredField: proto.String("good bye"),
- },
- Union: &pb.Test_Name{"fred"},
- }
- data, err := proto.Marshal(test)
- if err != nil {
- log.Fatal("marshaling error: ", err)
- }
- newTest := &pb.Test{}
- err = proto.Unmarshal(data, newTest)
- if err != nil {
- log.Fatal("unmarshaling error: ", err)
- }
- // Now test and newTest contain the same data.
- if test.GetLabel() != newTest.GetLabel() {
- log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
- }
- // Use a type switch to determine which oneof was set.
- switch u := test.Union.(type) {
- case *pb.Test_Number: // u.Number contains the number.
- case *pb.Test_Name: // u.Name contains the string.
- }
- // etc.
- }
-*/
-package proto
-
-import (
- "encoding/json"
- "fmt"
- "log"
- "reflect"
- "sort"
- "strconv"
- "sync"
-)
-
-// Message is implemented by generated protocol buffer messages.
-type Message interface {
- Reset()
- String() string
- ProtoMessage()
-}
-
-// Stats records allocation details about the protocol buffer encoders
-// and decoders. Useful for tuning the library itself.
-type Stats struct {
- Emalloc uint64 // mallocs in encode
- Dmalloc uint64 // mallocs in decode
- Encode uint64 // number of encodes
- Decode uint64 // number of decodes
- Chit uint64 // number of cache hits
- Cmiss uint64 // number of cache misses
- Size uint64 // number of sizes
-}
-
-// Set to true to enable stats collection.
-const collectStats = false
-
-var stats Stats
-
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
-
-// A Buffer is a buffer manager for marshaling and unmarshaling
-// protocol buffers. It may be reused between invocations to
-// reduce memory usage. It is not necessary to use a Buffer;
-// the global functions Marshal and Unmarshal create a
-// temporary Buffer and are fine for most applications.
-type Buffer struct {
- buf []byte // encode/decode byte stream
- index int // read point
-
- // pools of basic types to amortize allocation.
- bools []bool
- uint32s []uint32
- uint64s []uint64
-
- // extra pools, only used with pointer_reflect.go
- int32s []int32
- int64s []int64
- float32s []float32
- float64s []float64
-}
-
-// NewBuffer allocates a new Buffer and initializes its internal data to
-// the contents of the argument slice.
-func NewBuffer(e []byte) *Buffer {
- return &Buffer{buf: e}
-}
-
-// Reset resets the Buffer, ready for marshaling a new protocol buffer.
-func (p *Buffer) Reset() {
- p.buf = p.buf[0:0] // for reading/writing
- p.index = 0 // for reading
-}
-
-// SetBuf replaces the internal buffer with the slice,
-// ready for unmarshaling the contents of the slice.
-func (p *Buffer) SetBuf(s []byte) {
- p.buf = s
- p.index = 0
-}
-
-// Bytes returns the contents of the Buffer.
-func (p *Buffer) Bytes() []byte { return p.buf }
-
-/*
- * Helper routines for simplifying the creation of optional fields of basic type.
- */
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool {
- return &v
-}
-
-// Int32 is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it.
-func Int32(v int32) *int32 {
- return &v
-}
-
-// Int is a helper routine that allocates a new int32 value
-// to store v and returns a pointer to it, but unlike Int32
-// its argument value is an int.
-func Int(v int) *int32 {
- p := new(int32)
- *p = int32(v)
- return p
-}
-
-// Int64 is a helper routine that allocates a new int64 value
-// to store v and returns a pointer to it.
-func Int64(v int64) *int64 {
- return &v
-}
-
-// Float32 is a helper routine that allocates a new float32 value
-// to store v and returns a pointer to it.
-func Float32(v float32) *float32 {
- return &v
-}
-
-// Float64 is a helper routine that allocates a new float64 value
-// to store v and returns a pointer to it.
-func Float64(v float64) *float64 {
- return &v
-}
-
-// Uint32 is a helper routine that allocates a new uint32 value
-// to store v and returns a pointer to it.
-func Uint32(v uint32) *uint32 {
- return &v
-}
-
-// Uint64 is a helper routine that allocates a new uint64 value
-// to store v and returns a pointer to it.
-func Uint64(v uint64) *uint64 {
- return &v
-}
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string {
- return &v
-}
-
-// EnumName is a helper function to simplify printing protocol buffer enums
-// by name. Given an enum map and a value, it returns a useful string.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
-// from their JSON-encoded representation. Given a map from the enum's symbolic
-// names to its int values, and a byte buffer containing the JSON-encoded
-// value, it returns an int32 that can be cast to the enum type by the caller.
-//
-// The function can deal with both JSON representations, numeric and symbolic.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// DebugPrint dumps the encoded data in b in a debugging format with a header
-// including the string s. Used in testing but made available for general debugging.
-func (p *Buffer) DebugPrint(s string, b []byte) {
- var u uint64
-
- obuf := p.buf
- index := p.index
- p.buf = b
- p.index = 0
- depth := 0
-
- fmt.Printf("\n--- %s ---\n", s)
-
-out:
- for {
- for i := 0; i < depth; i++ {
- fmt.Print(" ")
- }
-
- index := p.index
- if index == len(p.buf) {
- break
- }
-
- op, err := p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: fetching op err %v\n", index, err)
- break out
- }
- tag := op >> 3
- wire := op & 7
-
- switch wire {
- default:
- fmt.Printf("%3d: t=%3d unknown wire=%d\n",
- index, tag, wire)
- break out
-
- case WireBytes:
- var r []byte
-
- r, err = p.DecodeRawBytes(false)
- if err != nil {
- break out
- }
- fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
- if len(r) <= 6 {
- for i := 0; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- } else {
- for i := 0; i < 3; i++ {
- fmt.Printf(" %.2x", r[i])
- }
- fmt.Printf(" ..")
- for i := len(r) - 3; i < len(r); i++ {
- fmt.Printf(" %.2x", r[i])
- }
- }
- fmt.Printf("\n")
-
- case WireFixed32:
- u, err = p.DecodeFixed32()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
-
- case WireFixed64:
- u, err = p.DecodeFixed64()
- if err != nil {
- fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
-
- case WireVarint:
- u, err = p.DecodeVarint()
- if err != nil {
- fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
- break out
- }
- fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
-
- case WireStartGroup:
- fmt.Printf("%3d: t=%3d start\n", index, tag)
- depth++
-
- case WireEndGroup:
- depth--
- fmt.Printf("%3d: t=%3d end\n", index, tag)
- }
- }
-
- if depth != 0 {
- fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
- }
- fmt.Printf("\n")
-
- p.buf = obuf
- p.index = index
-}
-
-// SetDefaults sets unset protocol buffer fields to their default values.
-// It only modifies fields that are both unset and have defined defaults.
-// It recursively sets default values in any non-nil sub-messages.
-func SetDefaults(pb Message) {
- setDefaults(reflect.ValueOf(pb), true, false)
-}
-
-// v is a pointer to a struct.
-func setDefaults(v reflect.Value, recur, zeros bool) {
- v = v.Elem()
-
- defaultMu.RLock()
- dm, ok := defaults[v.Type()]
- defaultMu.RUnlock()
- if !ok {
- dm = buildDefaultMessage(v.Type())
- defaultMu.Lock()
- defaults[v.Type()] = dm
- defaultMu.Unlock()
- }
-
- for _, sf := range dm.scalars {
- f := v.Field(sf.index)
- if !f.IsNil() {
- // field already set
- continue
- }
- dv := sf.value
- if dv == nil && !zeros {
- // no explicit default, and don't want to set zeros
- continue
- }
- fptr := f.Addr().Interface() // **T
- // TODO: Consider batching the allocations we do here.
- switch sf.kind {
- case reflect.Bool:
- b := new(bool)
- if dv != nil {
- *b = dv.(bool)
- }
- *(fptr.(**bool)) = b
- case reflect.Float32:
- f := new(float32)
- if dv != nil {
- *f = dv.(float32)
- }
- *(fptr.(**float32)) = f
- case reflect.Float64:
- f := new(float64)
- if dv != nil {
- *f = dv.(float64)
- }
- *(fptr.(**float64)) = f
- case reflect.Int32:
- // might be an enum
- if ft := f.Type(); ft != int32PtrType {
- // enum
- f.Set(reflect.New(ft.Elem()))
- if dv != nil {
- f.Elem().SetInt(int64(dv.(int32)))
- }
- } else {
- // int32 field
- i := new(int32)
- if dv != nil {
- *i = dv.(int32)
- }
- *(fptr.(**int32)) = i
- }
- case reflect.Int64:
- i := new(int64)
- if dv != nil {
- *i = dv.(int64)
- }
- *(fptr.(**int64)) = i
- case reflect.String:
- s := new(string)
- if dv != nil {
- *s = dv.(string)
- }
- *(fptr.(**string)) = s
- case reflect.Uint8:
- // exceptional case: []byte
- var b []byte
- if dv != nil {
- db := dv.([]byte)
- b = make([]byte, len(db))
- copy(b, db)
- } else {
- b = []byte{}
- }
- *(fptr.(*[]byte)) = b
- case reflect.Uint32:
- u := new(uint32)
- if dv != nil {
- *u = dv.(uint32)
- }
- *(fptr.(**uint32)) = u
- case reflect.Uint64:
- u := new(uint64)
- if dv != nil {
- *u = dv.(uint64)
- }
- *(fptr.(**uint64)) = u
- default:
- log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
- }
- }
-
- for _, ni := range dm.nested {
- f := v.Field(ni)
- // f is *T or []*T or map[T]*T
- switch f.Kind() {
- case reflect.Ptr:
- if f.IsNil() {
- continue
- }
- setDefaults(f, recur, zeros)
-
- case reflect.Slice:
- for i := 0; i < f.Len(); i++ {
- e := f.Index(i)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
-
- case reflect.Map:
- for _, k := range f.MapKeys() {
- e := f.MapIndex(k)
- if e.IsNil() {
- continue
- }
- setDefaults(e, recur, zeros)
- }
- }
- }
-}
-
-var (
- // defaults maps a protocol buffer struct type to a slice of the fields,
- // with its scalar fields set to their proto-declared non-zero default values.
- defaultMu sync.RWMutex
- defaults = make(map[reflect.Type]defaultMessage)
-
- int32PtrType = reflect.TypeOf((*int32)(nil))
-)
-
-// defaultMessage represents information about the default values of a message.
-type defaultMessage struct {
- scalars []scalarField
- nested []int // struct field index of nested messages
-}
-
-type scalarField struct {
- index int // struct field index
- kind reflect.Kind // element type (the T in *T or []T)
- value interface{} // the proto-declared default value, or nil
-}
-
-// t is a struct type.
-func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
- sprop := GetProperties(t)
- for _, prop := range sprop.Prop {
- fi, ok := sprop.decoderTags.get(prop.Tag)
- if !ok {
- // XXX_unrecognized
- continue
- }
- ft := t.Field(fi).Type
-
- sf, nested, err := fieldDefault(ft, prop)
- switch {
- case err != nil:
- log.Print(err)
- case nested:
- dm.nested = append(dm.nested, fi)
- case sf != nil:
- sf.index = fi
- dm.scalars = append(dm.scalars, *sf)
- }
- }
-
- return dm
-}
-
-// fieldDefault returns the scalarField for field type ft.
-// sf will be nil if the field can not have a default.
-// nestedMessage will be true if this is a nested message.
-// Note that sf.index is not set on return.
-func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
- var canHaveDefault bool
- switch ft.Kind() {
- case reflect.Ptr:
- if ft.Elem().Kind() == reflect.Struct {
- nestedMessage = true
- } else {
- canHaveDefault = true // proto2 scalar field
- }
-
- case reflect.Slice:
- switch ft.Elem().Kind() {
- case reflect.Ptr:
- nestedMessage = true // repeated message
- case reflect.Uint8:
- canHaveDefault = true // bytes field
- }
-
- case reflect.Map:
- if ft.Elem().Kind() == reflect.Ptr {
- nestedMessage = true // map with message values
- }
- }
-
- if !canHaveDefault {
- if nestedMessage {
- return nil, true, nil
- }
- return nil, false, nil
- }
-
- // We now know that ft is a pointer or slice.
- sf = &scalarField{kind: ft.Elem().Kind()}
-
- // scalar fields without defaults
- if !prop.HasDefault {
- return sf, false, nil
- }
-
- // a scalar field: either *T or []byte
- switch ft.Elem().Kind() {
- case reflect.Bool:
- x, err := strconv.ParseBool(prop.Default)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Float32:
- x, err := strconv.ParseFloat(prop.Default, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
- }
- sf.value = float32(x)
- case reflect.Float64:
- x, err := strconv.ParseFloat(prop.Default, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.Int32:
- x, err := strconv.ParseInt(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
- }
- sf.value = int32(x)
- case reflect.Int64:
- x, err := strconv.ParseInt(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
- }
- sf.value = x
- case reflect.String:
- sf.value = prop.Default
- case reflect.Uint8:
- // []byte (not *uint8)
- sf.value = []byte(prop.Default)
- case reflect.Uint32:
- x, err := strconv.ParseUint(prop.Default, 10, 32)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
- }
- sf.value = uint32(x)
- case reflect.Uint64:
- x, err := strconv.ParseUint(prop.Default, 10, 64)
- if err != nil {
- return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
- }
- sf.value = x
- default:
- return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
- }
-
- return sf, false, nil
-}
-
-// Map fields may have key types of non-float scalars, strings and enums.
-// The easiest way to sort them in some deterministic order is to use fmt.
-// If this turns out to be inefficient we can always consider other options,
-// such as doing a Schwartzian transform.
-
-func mapKeys(vs []reflect.Value) sort.Interface {
- s := mapKeySorter{
- vs: vs,
- // default Less function: textual comparison
- less: func(a, b reflect.Value) bool {
- return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
- },
- }
-
- // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
- // numeric keys are sorted numerically.
- if len(vs) == 0 {
- return s
- }
- switch vs[0].Kind() {
- case reflect.Int32, reflect.Int64:
- s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
- case reflect.Uint32, reflect.Uint64:
- s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
- }
-
- return s
-}
-
-type mapKeySorter struct {
- vs []reflect.Value
- less func(a, b reflect.Value) bool
-}
-
-func (s mapKeySorter) Len() int { return len(s.vs) }
-func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
-func (s mapKeySorter) Less(i, j int) bool {
- return s.less(s.vs[i], s.vs[j])
-}
-
-// isProto3Zero reports whether v is a zero proto3 value.
-func isProto3Zero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint32, reflect.Uint64:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.String:
- return v.String() == ""
- }
- return false
-}
-
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion2 = true
-
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion1 = true
diff --git a/vendor/src/github.com/golang/protobuf/proto/message_set.go b/vendor/src/github.com/golang/protobuf/proto/message_set.go
deleted file mode 100644
index fd982de..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/message_set.go
+++ /dev/null
@@ -1,311 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Support for message sets.
- */
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "reflect"
- "sort"
-)
-
-// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
-// A message type ID is required for storing a protocol buffer in a message set.
-var errNoMessageTypeID = errors.New("proto does not have a message type ID")
-
-// The first two types (_MessageSet_Item and messageSet)
-// model what the protocol compiler produces for the following protocol message:
-// message MessageSet {
-// repeated group Item = 1 {
-// required int32 type_id = 2;
-// required string message = 3;
-// };
-// }
-// That is the MessageSet wire format. We can't use a proto to generate these
-// because that would introduce a circular dependency between it and this package.
-
-type _MessageSet_Item struct {
- TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
- Message []byte `protobuf:"bytes,3,req,name=message"`
-}
-
-type messageSet struct {
- Item []*_MessageSet_Item `protobuf:"group,1,rep"`
- XXX_unrecognized []byte
- // TODO: caching?
-}
-
-// Make sure messageSet is a Message.
-var _ Message = (*messageSet)(nil)
-
-// messageTypeIder is an interface satisfied by a protocol buffer type
-// that may be stored in a MessageSet.
-type messageTypeIder interface {
- MessageTypeId() int32
-}
-
-func (ms *messageSet) find(pb Message) *_MessageSet_Item {
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return nil
- }
- id := mti.MessageTypeId()
- for _, item := range ms.Item {
- if *item.TypeId == id {
- return item
- }
- }
- return nil
-}
-
-func (ms *messageSet) Has(pb Message) bool {
- if ms.find(pb) != nil {
- return true
- }
- return false
-}
-
-func (ms *messageSet) Unmarshal(pb Message) error {
- if item := ms.find(pb); item != nil {
- return Unmarshal(item.Message, pb)
- }
- if _, ok := pb.(messageTypeIder); !ok {
- return errNoMessageTypeID
- }
- return nil // TODO: return error instead?
-}
-
-func (ms *messageSet) Marshal(pb Message) error {
- msg, err := Marshal(pb)
- if err != nil {
- return err
- }
- if item := ms.find(pb); item != nil {
- // reuse existing item
- item.Message = msg
- return nil
- }
-
- mti, ok := pb.(messageTypeIder)
- if !ok {
- return errNoMessageTypeID
- }
-
- mtid := mti.MessageTypeId()
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: &mtid,
- Message: msg,
- })
- return nil
-}
-
-func (ms *messageSet) Reset() { *ms = messageSet{} }
-func (ms *messageSet) String() string { return CompactTextString(ms) }
-func (*messageSet) ProtoMessage() {}
-
-// Support for the message_set_wire_format message option.
-
-func skipVarint(buf []byte) []byte {
- i := 0
- for ; buf[i]&0x80 != 0; i++ {
- }
- return buf[i+1:]
-}
-
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(exts interface{}) ([]byte, error) {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- if err := encodeExtensions(exts); err != nil {
- return nil, err
- }
- m, _ = exts.extensionsRead()
- case map[int32]Extension:
- if err := encodeExtensionsMap(exts); err != nil {
- return nil, err
- }
- m = exts
- default:
- return nil, errors.New("proto: not an extension map")
- }
-
- // Sort extension IDs to provide a deterministic encoding.
- // See also enc_map in encode.go.
- ids := make([]int, 0, len(m))
- for id := range m {
- ids = append(ids, int(id))
- }
- sort.Ints(ids)
-
- ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
- for _, id := range ids {
- e := m[int32(id)]
- // Remove the wire type and field number varint, as well as the length varint.
- msg := skipVarint(skipVarint(e.enc))
-
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: Int32(int32(id)),
- Message: msg,
- })
- }
- return Marshal(ms)
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, exts interface{}) error {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- m = exts.extensionsWrite()
- case map[int32]Extension:
- m = exts
- default:
- return errors.New("proto: not an extension map")
- }
-
- ms := new(messageSet)
- if err := Unmarshal(buf, ms); err != nil {
- return err
- }
- for _, item := range ms.Item {
- id := *item.TypeId
- msg := item.Message
-
- // Restore wire type and field number varint, plus length varint.
- // Be careful to preserve duplicate items.
- b := EncodeVarint(uint64(id)<<3 | WireBytes)
- if ext, ok := m[id]; ok {
- // Existing data; rip off the tag and length varint
- // so we join the new data correctly.
- // We can assume that ext.enc is set because we are unmarshaling.
- o := ext.enc[len(b):] // skip wire type and field number
- _, n := DecodeVarint(o) // calculate length of length varint
- o = o[n:] // skip length varint
- msg = append(o, msg...) // join old data and new data
- }
- b = append(b, EncodeVarint(uint64(len(msg)))...)
- b = append(b, msg...)
-
- m[id] = Extension{enc: b}
- }
- return nil
-}
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- m, _ = exts.extensionsRead()
- case map[int32]Extension:
- m = exts
- default:
- return nil, errors.New("proto: not an extension map")
- }
- var b bytes.Buffer
- b.WriteByte('{')
-
- // Process the map in key order for deterministic output.
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
- for i, id := range ids {
- ext := m[id]
- if i > 0 {
- b.WriteByte(',')
- }
-
- msd, ok := messageSetMap[id]
- if !ok {
- // Unknown type; we can't render it, so skip it.
- continue
- }
- fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
- x := ext.value
- if x == nil {
- x = reflect.New(msd.t.Elem()).Interface()
- if err := Unmarshal(ext.enc, x.(Message)); err != nil {
- return nil, err
- }
- }
- d, err := json.Marshal(x)
- if err != nil {
- return nil, err
- }
- b.Write(d)
- }
- b.WriteByte('}')
- return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
- // Common-case fast path.
- if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
- return nil
- }
-
- // This is fairly tricky, and it's not clear that it is needed.
- return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
- t reflect.Type // pointer to struct
- name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
- messageSetMap[fieldNum] = messageSetDesc{
- t: reflect.TypeOf(m),
- name: name,
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/message_set_test.go b/vendor/src/github.com/golang/protobuf/proto/message_set_test.go
deleted file mode 100644
index 353a3ea..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/message_set_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2014 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "bytes"
- "testing"
-)
-
-func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
- // Check that a repeated message set entry will be concatenated.
- in := &messageSet{
- Item: []*_MessageSet_Item{
- {TypeId: Int32(12345), Message: []byte("hoo")},
- {TypeId: Int32(12345), Message: []byte("hah")},
- },
- }
- b, err := Marshal(in)
- if err != nil {
- t.Fatalf("Marshal: %v", err)
- }
- t.Logf("Marshaled bytes: %q", b)
-
- var extensions XXX_InternalExtensions
- if err := UnmarshalMessageSet(b, &extensions); err != nil {
- t.Fatalf("UnmarshalMessageSet: %v", err)
- }
- ext, ok := extensions.p.extensionMap[12345]
- if !ok {
- t.Fatalf("Didn't retrieve extension 12345; map is %v", extensions.p.extensionMap)
- }
- // Skip wire type/field number and length varints.
- got := skipVarint(skipVarint(ext.enc))
- if want := []byte("hoohah"); !bytes.Equal(got, want) {
- t.Errorf("Combined extension is %q, want %q", got, want)
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go
deleted file mode 100644
index fb512e2..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go
+++ /dev/null
@@ -1,484 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build appengine js
-
-// This file contains an implementation of proto field accesses using package reflect.
-// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
-// be used on App Engine.
-
-package proto
-
-import (
- "math"
- "reflect"
-)
-
-// A structPointer is a pointer to a struct.
-type structPointer struct {
- v reflect.Value
-}
-
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-// The reflect value must itself be a pointer to a struct.
-func toStructPointer(v reflect.Value) structPointer {
- return structPointer{v}
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
- return p.v.IsNil()
-}
-
-// Interface returns the struct pointer as an interface value.
-func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
- return p.v.Interface()
-}
-
-// A field identifies a field in a struct, accessible from a structPointer.
-// In this implementation, a field is identified by the sequence of field indices
-// passed to reflect's FieldByIndex.
-type field []int
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return f.Index
-}
-
-// invalidField is an invalid field identifier.
-var invalidField = field(nil)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool { return f != nil }
-
-// field returns the given field in the struct as a reflect value.
-func structPointer_field(p structPointer, f field) reflect.Value {
- // Special case: an extension map entry with a value of type T
- // passes a *T to the struct-handling code with a zero field,
- // expecting that it will be treated as equivalent to *struct{ X T },
- // which has the same memory layout. We have to handle that case
- // specially, because reflect will panic if we call FieldByIndex on a
- // non-struct.
- if f == nil {
- return p.v.Elem()
- }
-
- return p.v.Elem().FieldByIndex(f)
-}
-
-// ifield returns the given field in the struct as an interface value.
-func structPointer_ifield(p structPointer, f field) interface{} {
- return structPointer_field(p, f).Addr().Interface()
-}
-
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
- return structPointer_ifield(p, f).(*[]byte)
-}
-
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
- return structPointer_ifield(p, f).(*[][]byte)
-}
-
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
- return structPointer_ifield(p, f).(**bool)
-}
-
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
- return structPointer_ifield(p, f).(*bool)
-}
-
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
- return structPointer_ifield(p, f).(*[]bool)
-}
-
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
- return structPointer_ifield(p, f).(**string)
-}
-
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
- return structPointer_ifield(p, f).(*string)
-}
-
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
- return structPointer_ifield(p, f).(*[]string)
-}
-
-// Extensions returns the address of an extension map field in the struct.
-func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
- return structPointer_ifield(p, f).(*XXX_InternalExtensions)
-}
-
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
- return structPointer_ifield(p, f).(*map[int32]Extension)
-}
-
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
- return structPointer_field(p, f).Addr()
-}
-
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
- structPointer_field(p, f).Set(q.v)
-}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
- return structPointer{structPointer_field(p, f)}
-}
-
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
- return structPointerSlice{structPointer_field(p, f)}
-}
-
-// A structPointerSlice represents the address of a slice of pointers to structs
-// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
-type structPointerSlice struct {
- v reflect.Value
-}
-
-func (p structPointerSlice) Len() int { return p.v.Len() }
-func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
-func (p structPointerSlice) Append(q structPointer) {
- p.v.Set(reflect.Append(p.v, q.v))
-}
-
-var (
- int32Type = reflect.TypeOf(int32(0))
- uint32Type = reflect.TypeOf(uint32(0))
- float32Type = reflect.TypeOf(float32(0))
- int64Type = reflect.TypeOf(int64(0))
- uint64Type = reflect.TypeOf(uint64(0))
- float64Type = reflect.TypeOf(float64(0))
-)
-
-// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
-// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
-type word32 struct {
- v reflect.Value
-}
-
-// IsNil reports whether p is nil.
-func word32_IsNil(p word32) bool {
- return p.v.IsNil()
-}
-
-// Set sets p to point at a newly allocated word with bits set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
- t := p.v.Type().Elem()
- switch t {
- case int32Type:
- if len(o.int32s) == 0 {
- o.int32s = make([]int32, uint32PoolSize)
- }
- o.int32s[0] = int32(x)
- p.v.Set(reflect.ValueOf(&o.int32s[0]))
- o.int32s = o.int32s[1:]
- return
- case uint32Type:
- if len(o.uint32s) == 0 {
- o.uint32s = make([]uint32, uint32PoolSize)
- }
- o.uint32s[0] = x
- p.v.Set(reflect.ValueOf(&o.uint32s[0]))
- o.uint32s = o.uint32s[1:]
- return
- case float32Type:
- if len(o.float32s) == 0 {
- o.float32s = make([]float32, uint32PoolSize)
- }
- o.float32s[0] = math.Float32frombits(x)
- p.v.Set(reflect.ValueOf(&o.float32s[0]))
- o.float32s = o.float32s[1:]
- return
- }
-
- // must be enum
- p.v.Set(reflect.New(t))
- p.v.Elem().SetInt(int64(int32(x)))
-}
-
-// Get gets the bits pointed at by p, as a uint32.
-func word32_Get(p word32) uint32 {
- elem := p.v.Elem()
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
-}
-
-// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
- return word32{structPointer_field(p, f)}
-}
-
-// A word32Val represents a field of type int32, uint32, float32, or enum.
-// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
-type word32Val struct {
- v reflect.Value
-}
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
- switch p.v.Type() {
- case int32Type:
- p.v.SetInt(int64(x))
- return
- case uint32Type:
- p.v.SetUint(uint64(x))
- return
- case float32Type:
- p.v.SetFloat(float64(math.Float32frombits(x)))
- return
- }
-
- // must be enum
- p.v.SetInt(int64(int32(x)))
-}
-
-// Get gets the bits pointed at by p, as a uint32.
-func word32Val_Get(p word32Val) uint32 {
- elem := p.v
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
-}
-
-// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
- return word32Val{structPointer_field(p, f)}
-}
-
-// A word32Slice is a slice of 32-bit values.
-// That is, v.Type() is []int32, []uint32, []float32, or []enum.
-type word32Slice struct {
- v reflect.Value
-}
-
-func (p word32Slice) Append(x uint32) {
- n, m := p.v.Len(), p.v.Cap()
- if n < m {
- p.v.SetLen(n + 1)
- } else {
- t := p.v.Type().Elem()
- p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
- }
- elem := p.v.Index(n)
- switch elem.Kind() {
- case reflect.Int32:
- elem.SetInt(int64(int32(x)))
- case reflect.Uint32:
- elem.SetUint(uint64(x))
- case reflect.Float32:
- elem.SetFloat(float64(math.Float32frombits(x)))
- }
-}
-
-func (p word32Slice) Len() int {
- return p.v.Len()
-}
-
-func (p word32Slice) Index(i int) uint32 {
- elem := p.v.Index(i)
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
-}
-
-// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) word32Slice {
- return word32Slice{structPointer_field(p, f)}
-}
-
-// word64 is like word32 but for 64-bit values.
-type word64 struct {
- v reflect.Value
-}
-
-func word64_Set(p word64, o *Buffer, x uint64) {
- t := p.v.Type().Elem()
- switch t {
- case int64Type:
- if len(o.int64s) == 0 {
- o.int64s = make([]int64, uint64PoolSize)
- }
- o.int64s[0] = int64(x)
- p.v.Set(reflect.ValueOf(&o.int64s[0]))
- o.int64s = o.int64s[1:]
- return
- case uint64Type:
- if len(o.uint64s) == 0 {
- o.uint64s = make([]uint64, uint64PoolSize)
- }
- o.uint64s[0] = x
- p.v.Set(reflect.ValueOf(&o.uint64s[0]))
- o.uint64s = o.uint64s[1:]
- return
- case float64Type:
- if len(o.float64s) == 0 {
- o.float64s = make([]float64, uint64PoolSize)
- }
- o.float64s[0] = math.Float64frombits(x)
- p.v.Set(reflect.ValueOf(&o.float64s[0]))
- o.float64s = o.float64s[1:]
- return
- }
- panic("unreachable")
-}
-
-func word64_IsNil(p word64) bool {
- return p.v.IsNil()
-}
-
-func word64_Get(p word64) uint64 {
- elem := p.v.Elem()
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return elem.Uint()
- case reflect.Float64:
- return math.Float64bits(elem.Float())
- }
- panic("unreachable")
-}
-
-func structPointer_Word64(p structPointer, f field) word64 {
- return word64{structPointer_field(p, f)}
-}
-
-// word64Val is like word32Val but for 64-bit values.
-type word64Val struct {
- v reflect.Value
-}
-
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
- switch p.v.Type() {
- case int64Type:
- p.v.SetInt(int64(x))
- return
- case uint64Type:
- p.v.SetUint(x)
- return
- case float64Type:
- p.v.SetFloat(math.Float64frombits(x))
- return
- }
- panic("unreachable")
-}
-
-func word64Val_Get(p word64Val) uint64 {
- elem := p.v
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return elem.Uint()
- case reflect.Float64:
- return math.Float64bits(elem.Float())
- }
- panic("unreachable")
-}
-
-func structPointer_Word64Val(p structPointer, f field) word64Val {
- return word64Val{structPointer_field(p, f)}
-}
-
-type word64Slice struct {
- v reflect.Value
-}
-
-func (p word64Slice) Append(x uint64) {
- n, m := p.v.Len(), p.v.Cap()
- if n < m {
- p.v.SetLen(n + 1)
- } else {
- t := p.v.Type().Elem()
- p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
- }
- elem := p.v.Index(n)
- switch elem.Kind() {
- case reflect.Int64:
- elem.SetInt(int64(int64(x)))
- case reflect.Uint64:
- elem.SetUint(uint64(x))
- case reflect.Float64:
- elem.SetFloat(float64(math.Float64frombits(x)))
- }
-}
-
-func (p word64Slice) Len() int {
- return p.v.Len()
-}
-
-func (p word64Slice) Index(i int) uint64 {
- elem := p.v.Index(i)
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return uint64(elem.Uint())
- case reflect.Float64:
- return math.Float64bits(float64(elem.Float()))
- }
- panic("unreachable")
-}
-
-func structPointer_Word64Slice(p structPointer, f field) word64Slice {
- return word64Slice{structPointer_field(p, f)}
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go
deleted file mode 100644
index 6b5567d..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// +build !appengine,!js
-
-// This file contains the implementation of the proto field accesses using package unsafe.
-
-package proto
-
-import (
- "reflect"
- "unsafe"
-)
-
-// NOTE: These type_Foo functions would more idiomatically be methods,
-// but Go does not allow methods on pointer types, and we must preserve
-// some pointer type for the garbage collector. We use these
-// funcs with clunky names as our poor approximation to methods.
-//
-// An alternative would be
-// type structPointer struct { p unsafe.Pointer }
-// but that does not registerize as well.
-
-// A structPointer is a pointer to a struct.
-type structPointer unsafe.Pointer
-
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-func toStructPointer(v reflect.Value) structPointer {
- return structPointer(unsafe.Pointer(v.Pointer()))
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
- return p == nil
-}
-
-// Interface returns the struct pointer, assumed to have element type t,
-// as an interface value.
-func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
- return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
-}
-
-// A field identifies a field in a struct, accessible from a structPointer.
-// In this implementation, a field is identified by its byte offset from the start of the struct.
-type field uintptr
-
-// toField returns a field equivalent to the given reflect field.
-func toField(f *reflect.StructField) field {
- return field(f.Offset)
-}
-
-// invalidField is an invalid field identifier.
-const invalidField = ^field(0)
-
-// IsValid reports whether the field identifier is valid.
-func (f field) IsValid() bool {
- return f != ^field(0)
-}
-
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
- return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
- return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
- return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
- return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
- return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
- return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
- return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
- return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
- return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
- return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
- return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
-}
-
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
- *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
-}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
- return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
- return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
-type structPointerSlice []structPointer
-
-func (v *structPointerSlice) Len() int { return len(*v) }
-func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
-func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
-
-// A word32 is the address of a "pointer to 32-bit value" field.
-type word32 **uint32
-
-// IsNil reports whether *v is nil.
-func word32_IsNil(p word32) bool {
- return *p == nil
-}
-
-// Set sets *v to point at a newly allocated word set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
- if len(o.uint32s) == 0 {
- o.uint32s = make([]uint32, uint32PoolSize)
- }
- o.uint32s[0] = x
- *p = &o.uint32s[0]
- o.uint32s = o.uint32s[1:]
-}
-
-// Get gets the value pointed at by *v.
-func word32_Get(p word32) uint32 {
- return **p
-}
-
-// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
- return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// A word32Val is the address of a 32-bit value field.
-type word32Val *uint32
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
- *p = x
-}
-
-// Get gets the value pointed at by p.
-func word32Val_Get(p word32Val) uint32 {
- return *p
-}
-
-// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
- return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// A word32Slice is a slice of 32-bit values.
-type word32Slice []uint32
-
-func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
-func (v *word32Slice) Len() int { return len(*v) }
-func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
-
-// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
- return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// word64 is like word32 but for 64-bit values.
-type word64 **uint64
-
-func word64_Set(p word64, o *Buffer, x uint64) {
- if len(o.uint64s) == 0 {
- o.uint64s = make([]uint64, uint64PoolSize)
- }
- o.uint64s[0] = x
- *p = &o.uint64s[0]
- o.uint64s = o.uint64s[1:]
-}
-
-func word64_IsNil(p word64) bool {
- return *p == nil
-}
-
-func word64_Get(p word64) uint64 {
- return **p
-}
-
-func structPointer_Word64(p structPointer, f field) word64 {
- return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// word64Val is like word32Val but for 64-bit values.
-type word64Val *uint64
-
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
- *p = x
-}
-
-func word64Val_Get(p word64Val) uint64 {
- return *p
-}
-
-func structPointer_Word64Val(p structPointer, f field) word64Val {
- return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// word64Slice is like word32Slice but for 64-bit values.
-type word64Slice []uint64
-
-func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
-func (v *word64Slice) Len() int { return len(*v) }
-func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
-
-func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
- return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/properties.go b/vendor/src/github.com/golang/protobuf/proto/properties.go
deleted file mode 100644
index ec2289c..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/properties.go
+++ /dev/null
@@ -1,872 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-/*
- * Routines for encoding data into the wire format for protocol buffers.
- */
-
-import (
- "fmt"
- "log"
- "os"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
-)
-
-const debug bool = false
-
-// Constants that identify the encoding of a value on the wire.
-const (
- WireVarint = 0
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
- WireFixed32 = 5
-)
-
-const startSize = 10 // initial slice/string sizes
-
-// Encoders are defined in encode.go
-// An encoder outputs the full representation of a field, including its
-// tag and encoder type.
-type encoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueEncoder encodes a single integer in a particular encoding.
-type valueEncoder func(o *Buffer, x uint64) error
-
-// Sizers are defined in encode.go
-// A sizer returns the encoded size of a field, including its tag and encoder
-// type.
-type sizer func(prop *Properties, base structPointer) int
-
-// A valueSizer returns the encoded size of a single integer in a particular
-// encoding.
-type valueSizer func(x uint64) int
-
-// Decoders are defined in decode.go
-// A decoder creates a value from its wire representation.
-// Unrecognized subelements are saved in unrec.
-type decoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueDecoder decodes a single integer in a particular encoding.
-type valueDecoder func(o *Buffer) (x uint64, err error)
-
-// A oneofMarshaler does the marshaling for all oneof fields in a message.
-type oneofMarshaler func(Message, *Buffer) error
-
-// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
-type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
-
-// A oneofSizer does the sizing for all oneof fields in a message.
-type oneofSizer func(Message) int
-
-// tagMap is an optimization over map[int]int for typical protocol buffer
-// use-cases. Encoded protocol buffers are often in tag order with small tag
-// numbers.
-type tagMap struct {
- fastTags []int
- slowTags map[int]int
-}
-
-// tagMapFastLimit is the upper bound on the tag number that will be stored in
-// the tagMap slice rather than its map.
-const tagMapFastLimit = 1024
-
-func (p *tagMap) get(t int) (int, bool) {
- if t > 0 && t < tagMapFastLimit {
- if t >= len(p.fastTags) {
- return 0, false
- }
- fi := p.fastTags[t]
- return fi, fi >= 0
- }
- fi, ok := p.slowTags[t]
- return fi, ok
-}
-
-func (p *tagMap) put(t int, fi int) {
- if t > 0 && t < tagMapFastLimit {
- for len(p.fastTags) < t+1 {
- p.fastTags = append(p.fastTags, -1)
- }
- p.fastTags[t] = fi
- return
- }
- if p.slowTags == nil {
- p.slowTags = make(map[int]int)
- }
- p.slowTags[t] = fi
-}
-
-// StructProperties represents properties for all the fields of a struct.
-// decoderTags and decoderOrigNames should only be used by the decoder.
-type StructProperties struct {
- Prop []*Properties // properties for each field
- reqCount int // required count
- decoderTags tagMap // map from proto tag to struct field number
- decoderOrigNames map[string]int // map from original name to struct field number
- order []int // list of struct field numbers in tag order
- unrecField field // field id of the XXX_unrecognized []byte field
- extendable bool // is this an extendable proto
-
- oneofMarshaler oneofMarshaler
- oneofUnmarshaler oneofUnmarshaler
- oneofSizer oneofSizer
- stype reflect.Type
-
- // OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the original name of a field.
- OneofTypes map[string]*OneofProperties
-}
-
-// OneofProperties represents information about a specific field in a oneof.
-type OneofProperties struct {
- Type reflect.Type // pointer to generated struct type for this oneof field
- Field int // struct field number of the containing oneof in the message
- Prop *Properties
-}
-
-// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
-// See encode.go, (*Buffer).enc_struct.
-
-func (sp *StructProperties) Len() int { return len(sp.order) }
-func (sp *StructProperties) Less(i, j int) bool {
- return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
-}
-func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
-
-// Properties represents the protocol-specific behavior of a single struct field.
-type Properties struct {
- Name string // name of the field, for error messages
- OrigName string // original name before protocol compiler (always set)
- JSONName string // name to use for JSON; determined by protoc
- Wire string
- WireType int
- Tag int
- Required bool
- Optional bool
- Repeated bool
- Packed bool // relevant for repeated primitives only
- Enum string // set for enum types only
- proto3 bool // whether this is known to be a proto3 field; set for []byte only
- oneof bool // whether this is a oneof field
-
- Default string // default value
- HasDefault bool // whether an explicit default was provided
- def_uint64 uint64
-
- enc encoder
- valEnc valueEncoder // set for bool and numeric types only
- field field
- tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
- tagbuf [8]byte
- stype reflect.Type // set for struct types only
- sprop *StructProperties // set for struct types only
- isMarshaler bool
- isUnmarshaler bool
-
- mtype reflect.Type // set for map types only
- mkeyprop *Properties // set for map types only
- mvalprop *Properties // set for map types only
-
- size sizer
- valSize valueSizer // set for bool and numeric types only
-
- dec decoder
- valDec valueDecoder // set for bool and numeric types only
-
- // If this is a packable field, this will be the decoder for the packed version of the field.
- packedDec decoder
-}
-
-// String formats the properties in the protobuf struct field tag style.
-func (p *Properties) String() string {
- s := p.Wire
- s = ","
- s += strconv.Itoa(p.Tag)
- if p.Required {
- s += ",req"
- }
- if p.Optional {
- s += ",opt"
- }
- if p.Repeated {
- s += ",rep"
- }
- if p.Packed {
- s += ",packed"
- }
- s += ",name=" + p.OrigName
- if p.JSONName != p.OrigName {
- s += ",json=" + p.JSONName
- }
- if p.proto3 {
- s += ",proto3"
- }
- if p.oneof {
- s += ",oneof"
- }
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
- if p.HasDefault {
- s += ",def=" + p.Default
- }
- return s
-}
-
-// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(s string) {
- // "bytes,49,opt,name=foo,def=hello!"
- fields := strings.Split(s, ",") // breaks def=, but handled below.
- if len(fields) < 2 {
- fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
- return
- }
-
- p.Wire = fields[0]
- switch p.Wire {
- case "varint":
- p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeVarint
- p.valDec = (*Buffer).DecodeVarint
- p.valSize = sizeVarint
- case "fixed32":
- p.WireType = WireFixed32
- p.valEnc = (*Buffer).EncodeFixed32
- p.valDec = (*Buffer).DecodeFixed32
- p.valSize = sizeFixed32
- case "fixed64":
- p.WireType = WireFixed64
- p.valEnc = (*Buffer).EncodeFixed64
- p.valDec = (*Buffer).DecodeFixed64
- p.valSize = sizeFixed64
- case "zigzag32":
- p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeZigzag32
- p.valDec = (*Buffer).DecodeZigzag32
- p.valSize = sizeZigzag32
- case "zigzag64":
- p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeZigzag64
- p.valDec = (*Buffer).DecodeZigzag64
- p.valSize = sizeZigzag64
- case "bytes", "group":
- p.WireType = WireBytes
- // no numeric converter for non-numeric types
- default:
- fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
- return
- }
-
- var err error
- p.Tag, err = strconv.Atoi(fields[1])
- if err != nil {
- return
- }
-
- for i := 2; i < len(fields); i++ {
- f := fields[i]
- switch {
- case f == "req":
- p.Required = true
- case f == "opt":
- p.Optional = true
- case f == "rep":
- p.Repeated = true
- case f == "packed":
- p.Packed = true
- case strings.HasPrefix(f, "name="):
- p.OrigName = f[5:]
- case strings.HasPrefix(f, "json="):
- p.JSONName = f[5:]
- case strings.HasPrefix(f, "enum="):
- p.Enum = f[5:]
- case f == "proto3":
- p.proto3 = true
- case f == "oneof":
- p.oneof = true
- case strings.HasPrefix(f, "def="):
- p.HasDefault = true
- p.Default = f[4:] // rest of string
- if i+1 < len(fields) {
- // Commas aren't escaped, and def is always last.
- p.Default += "," + strings.Join(fields[i+1:], ",")
- break
- }
- }
- }
-}
-
-func logNoSliceEnc(t1, t2 reflect.Type) {
- fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
-}
-
-var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-
-// Initialize the fields for encoding and decoding.
-func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
- p.enc = nil
- p.dec = nil
- p.size = nil
-
- switch t1 := typ; t1.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
-
- // proto3 scalar types
-
- case reflect.Bool:
- p.enc = (*Buffer).enc_proto3_bool
- p.dec = (*Buffer).dec_proto3_bool
- p.size = size_proto3_bool
- case reflect.Int32:
- p.enc = (*Buffer).enc_proto3_int32
- p.dec = (*Buffer).dec_proto3_int32
- p.size = size_proto3_int32
- case reflect.Uint32:
- p.enc = (*Buffer).enc_proto3_uint32
- p.dec = (*Buffer).dec_proto3_int32 // can reuse
- p.size = size_proto3_uint32
- case reflect.Int64, reflect.Uint64:
- p.enc = (*Buffer).enc_proto3_int64
- p.dec = (*Buffer).dec_proto3_int64
- p.size = size_proto3_int64
- case reflect.Float32:
- p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
- p.dec = (*Buffer).dec_proto3_int32
- p.size = size_proto3_uint32
- case reflect.Float64:
- p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
- p.dec = (*Buffer).dec_proto3_int64
- p.size = size_proto3_int64
- case reflect.String:
- p.enc = (*Buffer).enc_proto3_string
- p.dec = (*Buffer).dec_proto3_string
- p.size = size_proto3_string
-
- case reflect.Ptr:
- switch t2 := t1.Elem(); t2.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
- break
- case reflect.Bool:
- p.enc = (*Buffer).enc_bool
- p.dec = (*Buffer).dec_bool
- p.size = size_bool
- case reflect.Int32:
- p.enc = (*Buffer).enc_int32
- p.dec = (*Buffer).dec_int32
- p.size = size_int32
- case reflect.Uint32:
- p.enc = (*Buffer).enc_uint32
- p.dec = (*Buffer).dec_int32 // can reuse
- p.size = size_uint32
- case reflect.Int64, reflect.Uint64:
- p.enc = (*Buffer).enc_int64
- p.dec = (*Buffer).dec_int64
- p.size = size_int64
- case reflect.Float32:
- p.enc = (*Buffer).enc_uint32 // can just treat them as bits
- p.dec = (*Buffer).dec_int32
- p.size = size_uint32
- case reflect.Float64:
- p.enc = (*Buffer).enc_int64 // can just treat them as bits
- p.dec = (*Buffer).dec_int64
- p.size = size_int64
- case reflect.String:
- p.enc = (*Buffer).enc_string
- p.dec = (*Buffer).dec_string
- p.size = size_string
- case reflect.Struct:
- p.stype = t1.Elem()
- p.isMarshaler = isMarshaler(t1)
- p.isUnmarshaler = isUnmarshaler(t1)
- if p.Wire == "bytes" {
- p.enc = (*Buffer).enc_struct_message
- p.dec = (*Buffer).dec_struct_message
- p.size = size_struct_message
- } else {
- p.enc = (*Buffer).enc_struct_group
- p.dec = (*Buffer).dec_struct_group
- p.size = size_struct_group
- }
- }
-
- case reflect.Slice:
- switch t2 := t1.Elem(); t2.Kind() {
- default:
- logNoSliceEnc(t1, t2)
- break
- case reflect.Bool:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_bool
- p.size = size_slice_packed_bool
- } else {
- p.enc = (*Buffer).enc_slice_bool
- p.size = size_slice_bool
- }
- p.dec = (*Buffer).dec_slice_bool
- p.packedDec = (*Buffer).dec_slice_packed_bool
- case reflect.Int32:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int32
- p.size = size_slice_packed_int32
- } else {
- p.enc = (*Buffer).enc_slice_int32
- p.size = size_slice_int32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case reflect.Uint32:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_uint32
- p.size = size_slice_packed_uint32
- } else {
- p.enc = (*Buffer).enc_slice_uint32
- p.size = size_slice_uint32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case reflect.Int64, reflect.Uint64:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int64
- p.size = size_slice_packed_int64
- } else {
- p.enc = (*Buffer).enc_slice_int64
- p.size = size_slice_int64
- }
- p.dec = (*Buffer).dec_slice_int64
- p.packedDec = (*Buffer).dec_slice_packed_int64
- case reflect.Uint8:
- p.dec = (*Buffer).dec_slice_byte
- if p.proto3 {
- p.enc = (*Buffer).enc_proto3_slice_byte
- p.size = size_proto3_slice_byte
- } else {
- p.enc = (*Buffer).enc_slice_byte
- p.size = size_slice_byte
- }
- case reflect.Float32, reflect.Float64:
- switch t2.Bits() {
- case 32:
- // can just treat them as bits
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_uint32
- p.size = size_slice_packed_uint32
- } else {
- p.enc = (*Buffer).enc_slice_uint32
- p.size = size_slice_uint32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case 64:
- // can just treat them as bits
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int64
- p.size = size_slice_packed_int64
- } else {
- p.enc = (*Buffer).enc_slice_int64
- p.size = size_slice_int64
- }
- p.dec = (*Buffer).dec_slice_int64
- p.packedDec = (*Buffer).dec_slice_packed_int64
- default:
- logNoSliceEnc(t1, t2)
- break
- }
- case reflect.String:
- p.enc = (*Buffer).enc_slice_string
- p.dec = (*Buffer).dec_slice_string
- p.size = size_slice_string
- case reflect.Ptr:
- switch t3 := t2.Elem(); t3.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
- break
- case reflect.Struct:
- p.stype = t2.Elem()
- p.isMarshaler = isMarshaler(t2)
- p.isUnmarshaler = isUnmarshaler(t2)
- if p.Wire == "bytes" {
- p.enc = (*Buffer).enc_slice_struct_message
- p.dec = (*Buffer).dec_slice_struct_message
- p.size = size_slice_struct_message
- } else {
- p.enc = (*Buffer).enc_slice_struct_group
- p.dec = (*Buffer).dec_slice_struct_group
- p.size = size_slice_struct_group
- }
- }
- case reflect.Slice:
- switch t2.Elem().Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
- break
- case reflect.Uint8:
- p.enc = (*Buffer).enc_slice_slice_byte
- p.dec = (*Buffer).dec_slice_slice_byte
- p.size = size_slice_slice_byte
- }
- }
-
- case reflect.Map:
- p.enc = (*Buffer).enc_new_map
- p.dec = (*Buffer).dec_new_map
- p.size = size_new_map
-
- p.mtype = t1
- p.mkeyprop = &Properties{}
- p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
- p.mvalprop = &Properties{}
- vtype := p.mtype.Elem()
- if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
- // The value type is not a message (*T) or bytes ([]byte),
- // so we need encoders for the pointer to this type.
- vtype = reflect.PtrTo(vtype)
- }
- p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
- }
-
- // precalculate tag code
- wire := p.WireType
- if p.Packed {
- wire = WireBytes
- }
- x := uint32(p.Tag)<<3 | uint32(wire)
- i := 0
- for i = 0; x > 127; i++ {
- p.tagbuf[i] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- p.tagbuf[i] = uint8(x)
- p.tagcode = p.tagbuf[0 : i+1]
-
- if p.stype != nil {
- if lockGetProp {
- p.sprop = GetProperties(p.stype)
- } else {
- p.sprop = getPropertiesLocked(p.stype)
- }
- }
-}
-
-var (
- marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
- unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
-)
-
-// isMarshaler reports whether type t implements Marshaler.
-func isMarshaler(t reflect.Type) bool {
- // We're checking for (likely) pointer-receiver methods
- // so if t is not a pointer, something is very wrong.
- // The calls above only invoke isMarshaler on pointer types.
- if t.Kind() != reflect.Ptr {
- panic("proto: misuse of isMarshaler")
- }
- return t.Implements(marshalerType)
-}
-
-// isUnmarshaler reports whether type t implements Unmarshaler.
-func isUnmarshaler(t reflect.Type) bool {
- // We're checking for (likely) pointer-receiver methods
- // so if t is not a pointer, something is very wrong.
- // The calls above only invoke isUnmarshaler on pointer types.
- if t.Kind() != reflect.Ptr {
- panic("proto: misuse of isUnmarshaler")
- }
- return t.Implements(unmarshalerType)
-}
-
-// Init populates the properties from a protocol buffer struct tag.
-func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.init(typ, name, tag, f, true)
-}
-
-func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
- // "bytes,49,opt,def=hello!"
- p.Name = name
- p.OrigName = name
- if f != nil {
- p.field = toField(f)
- }
- if tag == "" {
- return
- }
- p.Parse(tag)
- p.setEncAndDec(typ, f, lockGetProp)
-}
-
-var (
- propertiesMu sync.RWMutex
- propertiesMap = make(map[reflect.Type]*StructProperties)
-)
-
-// GetProperties returns the list of properties for the type represented by t.
-// t must represent a generated struct type of a protocol message.
-func GetProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic("proto: type must have kind struct")
- }
-
- // Most calls to GetProperties in a long-running program will be
- // retrieving details for types we have seen before.
- propertiesMu.RLock()
- sprop, ok := propertiesMap[t]
- propertiesMu.RUnlock()
- if ok {
- if collectStats {
- stats.Chit++
- }
- return sprop
- }
-
- propertiesMu.Lock()
- sprop = getPropertiesLocked(t)
- propertiesMu.Unlock()
- return sprop
-}
-
-// getPropertiesLocked requires that propertiesMu is held.
-func getPropertiesLocked(t reflect.Type) *StructProperties {
- if prop, ok := propertiesMap[t]; ok {
- if collectStats {
- stats.Chit++
- }
- return prop
- }
- if collectStats {
- stats.Cmiss++
- }
-
- prop := new(StructProperties)
- // in case of recursive protos, fill this in now.
- propertiesMap[t] = prop
-
- // build properties
- prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
- reflect.PtrTo(t).Implements(extendableProtoV1Type)
- prop.unrecField = invalidField
- prop.Prop = make([]*Properties, t.NumField())
- prop.order = make([]int, t.NumField())
-
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- p := new(Properties)
- name := f.Name
- p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
-
- if f.Name == "XXX_InternalExtensions" { // special case
- p.enc = (*Buffer).enc_exts
- p.dec = nil // not needed
- p.size = size_exts
- } else if f.Name == "XXX_extensions" { // special case
- p.enc = (*Buffer).enc_map
- p.dec = nil // not needed
- p.size = size_map
- } else if f.Name == "XXX_unrecognized" { // special case
- prop.unrecField = toField(&f)
- }
- oneof := f.Tag.Get("protobuf_oneof") // special case
- if oneof != "" {
- // Oneof fields don't use the traditional protobuf tag.
- p.OrigName = oneof
- }
- prop.Prop[i] = p
- prop.order[i] = i
- if debug {
- print(i, " ", f.Name, " ", t.String(), " ")
- if p.Tag > 0 {
- print(p.String())
- }
- print("\n")
- }
- if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
- fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
- }
- }
-
- // Re-order prop.order.
- sort.Sort(prop)
-
- type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
- }
- if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
- var oots []interface{}
- prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
- prop.stype = t
-
- // Interpret oneof metadata.
- prop.OneofTypes = make(map[string]*OneofProperties)
- for _, oot := range oots {
- oop := &OneofProperties{
- Type: reflect.ValueOf(oot).Type(), // *T
- Prop: new(Properties),
- }
- sft := oop.Type.Elem().Field(0)
- oop.Prop.Name = sft.Name
- oop.Prop.Parse(sft.Tag.Get("protobuf"))
- // There will be exactly one interface field that
- // this new value is assignable to.
- for i := 0; i < t.NumField(); i++ {
- f := t.Field(i)
- if f.Type.Kind() != reflect.Interface {
- continue
- }
- if !oop.Type.AssignableTo(f.Type) {
- continue
- }
- oop.Field = i
- break
- }
- prop.OneofTypes[oop.Prop.OrigName] = oop
- }
- }
-
- // build required counts
- // build tags
- reqCount := 0
- prop.decoderOrigNames = make(map[string]int)
- for i, p := range prop.Prop {
- if strings.HasPrefix(p.Name, "XXX_") {
- // Internal fields should not appear in tags/origNames maps.
- // They are handled specially when encoding and decoding.
- continue
- }
- if p.Required {
- reqCount++
- }
- prop.decoderTags.put(p.Tag, i)
- prop.decoderOrigNames[p.OrigName] = i
- }
- prop.reqCount = reqCount
-
- return prop
-}
-
-// Return the Properties object for the x[0]'th field of the structure.
-func propByIndex(t reflect.Type, x []int) *Properties {
- if len(x) != 1 {
- fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
- return nil
- }
- prop := GetProperties(t)
- return prop.Prop[x[0]]
-}
-
-// Get the address and type of a pointer to a struct from an interface.
-func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
- if pb == nil {
- err = ErrNil
- return
- }
- // get the reflect type of the pointer to the struct.
- t = reflect.TypeOf(pb)
- // get the address of the struct.
- value := reflect.ValueOf(pb)
- b = toStructPointer(value)
- return
-}
-
-// A global registry of enum types.
-// The generated code will register the generated maps by calling RegisterEnum.
-
-var enumValueMaps = make(map[string]map[string]int32)
-
-// RegisterEnum is called from the generated code to install the enum descriptor
-// maps into the global table to aid parsing text format protocol buffers.
-func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
- if _, ok := enumValueMaps[typeName]; ok {
- panic("proto: duplicate enum registered: " + typeName)
- }
- enumValueMaps[typeName] = valueMap
-}
-
-// EnumValueMap returns the mapping from names to integers of the
-// enum type enumType, or a nil if not found.
-func EnumValueMap(enumType string) map[string]int32 {
- return enumValueMaps[enumType]
-}
-
-// A registry of all linked message types.
-// The string is a fully-qualified proto name ("pkg.Message").
-var (
- protoTypes = make(map[string]reflect.Type)
- revProtoTypes = make(map[reflect.Type]string)
-)
-
-// RegisterType is called from generated code and maps from the fully qualified
-// proto name to the type (pointer to struct) of the protocol buffer.
-func RegisterType(x Message, name string) {
- if _, ok := protoTypes[name]; ok {
- // TODO: Some day, make this a panic.
- log.Printf("proto: duplicate proto type registered: %s", name)
- return
- }
- t := reflect.TypeOf(x)
- protoTypes[name] = t
- revProtoTypes[t] = name
-}
-
-// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string {
- type xname interface {
- XXX_MessageName() string
- }
- if m, ok := x.(xname); ok {
- return m.XXX_MessageName()
- }
- return revProtoTypes[reflect.TypeOf(x)]
-}
-
-// MessageType returns the message type (pointer to struct) for a named message.
-func MessageType(name string) reflect.Type { return protoTypes[name] }
-
-// A registry of all linked proto files.
-var (
- protoFiles = make(map[string][]byte) // file name => fileDescriptor
-)
-
-// RegisterFile is called from generated code and maps from the
-// full file name of a .proto file to its compressed FileDescriptorProto.
-func RegisterFile(filename string, fileDescriptor []byte) {
- protoFiles[filename] = fileDescriptor
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
-func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
deleted file mode 100644
index 19383ef..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: proto3_proto/proto3.proto
-// DO NOT EDIT!
-
-/*
-Package proto3_proto is a generated protocol buffer package.
-
-It is generated from these files:
- proto3_proto/proto3.proto
-
-It has these top-level messages:
- Message
- Nested
- MessageWithMap
-*/
-package proto3_proto
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/golang/protobuf/ptypes/any"
-import testdata "github.com/golang/protobuf/proto/testdata"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-const _ = proto.ProtoPackageIsVersion1
-
-type Message_Humour int32
-
-const (
- Message_UNKNOWN Message_Humour = 0
- Message_PUNS Message_Humour = 1
- Message_SLAPSTICK Message_Humour = 2
- Message_BILL_BAILEY Message_Humour = 3
-)
-
-var Message_Humour_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "PUNS",
- 2: "SLAPSTICK",
- 3: "BILL_BAILEY",
-}
-var Message_Humour_value = map[string]int32{
- "UNKNOWN": 0,
- "PUNS": 1,
- "SLAPSTICK": 2,
- "BILL_BAILEY": 3,
-}
-
-func (x Message_Humour) String() string {
- return proto.EnumName(Message_Humour_name, int32(x))
-}
-func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
-
-type Message struct {
- Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"`
- HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"`
- Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
- ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"`
- TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"`
- Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"`
- Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"`
- ShortKey []int32 `protobuf:"varint,19,rep,name=short_key,json=shortKey" json:"short_key,omitempty"`
- Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"`
- RFunny []Message_Humour `protobuf:"varint,16,rep,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"`
- Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"`
- Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"`
- ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"`
- Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"`
- Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"`
-}
-
-func (m *Message) Reset() { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage() {}
-func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-func (m *Message) GetNested() *Nested {
- if m != nil {
- return m.Nested
- }
- return nil
-}
-
-func (m *Message) GetTerrain() map[string]*Nested {
- if m != nil {
- return m.Terrain
- }
- return nil
-}
-
-func (m *Message) GetProto2Field() *testdata.SubDefaults {
- if m != nil {
- return m.Proto2Field
- }
- return nil
-}
-
-func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults {
- if m != nil {
- return m.Proto2Value
- }
- return nil
-}
-
-func (m *Message) GetAnything() *google_protobuf.Any {
- if m != nil {
- return m.Anything
- }
- return nil
-}
-
-func (m *Message) GetManyThings() []*google_protobuf.Any {
- if m != nil {
- return m.ManyThings
- }
- return nil
-}
-
-func (m *Message) GetSubmessage() *Message {
- if m != nil {
- return m.Submessage
- }
- return nil
-}
-
-func (m *Message) GetChildren() []*Message {
- if m != nil {
- return m.Children
- }
- return nil
-}
-
-type Nested struct {
- Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"`
- Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"`
-}
-
-func (m *Nested) Reset() { *m = Nested{} }
-func (m *Nested) String() string { return proto.CompactTextString(m) }
-func (*Nested) ProtoMessage() {}
-func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-
-type MessageWithMap struct {
- ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"`
-}
-
-func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
-func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
-func (*MessageWithMap) ProtoMessage() {}
-func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
- if m != nil {
- return m.ByteMapping
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Message)(nil), "proto3_proto.Message")
- proto.RegisterType((*Nested)(nil), "proto3_proto.Nested")
- proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap")
- proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
-}
-
-var fileDescriptor0 = []byte{
- // 669 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xd3, 0x3a,
- 0x18, 0xbd, 0x7d, 0x4f, 0x9f, 0xa4, 0x5b, 0xae, 0xb7, 0x2b, 0x79, 0xbd, 0x7c, 0x18, 0x45, 0x42,
- 0x13, 0x2f, 0x19, 0x14, 0x21, 0x4d, 0x08, 0x81, 0xb6, 0xb1, 0x89, 0x6a, 0x5d, 0xa9, 0xdc, 0x8d,
- 0x89, 0x4f, 0x51, 0xd2, 0xba, 0x6d, 0x44, 0xe3, 0x54, 0x89, 0x83, 0x94, 0xbf, 0xc3, 0x1f, 0x05,
- 0xbf, 0xa4, 0x5d, 0x36, 0x75, 0xf0, 0x29, 0xf6, 0x79, 0xce, 0xf1, 0xf3, 0xe4, 0xf8, 0x18, 0xf6,
- 0x96, 0x71, 0xc4, 0xa3, 0x37, 0xae, 0xfa, 0x1c, 0xea, 0x8d, 0xa3, 0x3e, 0xc8, 0x2a, 0x96, 0xda,
- 0x7b, 0xb3, 0x28, 0x9a, 0x2d, 0xa8, 0xa6, 0xf8, 0xe9, 0xf4, 0xd0, 0x63, 0x99, 0x26, 0xb6, 0x77,
- 0x38, 0x4d, 0xf8, 0xc4, 0xe3, 0xde, 0xa1, 0x5c, 0x68, 0xb0, 0xf3, 0xab, 0x01, 0x8d, 0x4b, 0x9a,
- 0x24, 0xde, 0x8c, 0x22, 0x04, 0x55, 0xe6, 0x85, 0x14, 0x97, 0xf6, 0x4b, 0x07, 0x4d, 0xa2, 0xd6,
- 0xe8, 0x08, 0x8c, 0x79, 0xb0, 0xf0, 0xe2, 0x80, 0x67, 0xb8, 0x2c, 0xf0, 0xad, 0xee, 0x23, 0xa7,
- 0xd8, 0xd0, 0xc9, 0xc5, 0xce, 0xe7, 0x34, 0x8c, 0xd2, 0x98, 0xac, 0xd9, 0x68, 0x1f, 0xac, 0x39,
- 0x0d, 0x66, 0x73, 0xee, 0x06, 0xcc, 0x1d, 0x87, 0xb8, 0x22, 0xd4, 0x2d, 0x02, 0x1a, 0xeb, 0xb1,
- 0xd3, 0x50, 0xf6, 0x93, 0xe3, 0xe0, 0xaa, 0xa8, 0x58, 0x44, 0xad, 0xd1, 0x63, 0xb0, 0x62, 0x9a,
- 0xa4, 0x0b, 0xee, 0x8e, 0xa3, 0x94, 0x71, 0xdc, 0x10, 0xb5, 0x0a, 0x31, 0x35, 0x76, 0x2a, 0x21,
- 0xf4, 0x04, 0x5a, 0x3c, 0x4e, 0xa9, 0x9b, 0x8c, 0x23, 0x9e, 0x84, 0x1e, 0xc3, 0x86, 0xe0, 0x18,
- 0xc4, 0x92, 0xe0, 0x28, 0xc7, 0xd0, 0x2e, 0xd4, 0x44, 0x3d, 0xa6, 0xb8, 0x29, 0x8a, 0x65, 0xa2,
- 0x37, 0xc8, 0x86, 0xca, 0x77, 0x9a, 0xe1, 0xda, 0x7e, 0xe5, 0xa0, 0x4a, 0xe4, 0x12, 0xfd, 0x0f,
- 0xcd, 0x64, 0x1e, 0xc5, 0xdc, 0x95, 0xf8, 0x8e, 0xc0, 0x6b, 0xc4, 0x50, 0xc0, 0x85, 0x28, 0xbe,
- 0x80, 0x3a, 0x13, 0x56, 0xd1, 0x09, 0xae, 0x8b, 0x53, 0xcc, 0xee, 0xee, 0xdd, 0x5f, 0x1f, 0xa8,
- 0x1a, 0xc9, 0x39, 0xe8, 0x2d, 0x34, 0x62, 0x77, 0x9a, 0x32, 0x96, 0x61, 0x5b, 0x1c, 0xf4, 0x37,
- 0xa7, 0xea, 0xf1, 0xb9, 0xe4, 0xa2, 0xf7, 0xd0, 0xe0, 0x34, 0x8e, 0xbd, 0x80, 0x61, 0x10, 0x32,
- 0xb3, 0xdb, 0xd9, 0x2c, 0xbb, 0xd2, 0xa4, 0x33, 0xc6, 0xe3, 0x8c, 0xac, 0x24, 0xe2, 0x7e, 0xf4,
- 0xfd, 0x77, 0xdd, 0x69, 0x40, 0x17, 0x13, 0x6c, 0xaa, 0x41, 0xff, 0x73, 0x56, 0x77, 0xed, 0x8c,
- 0x52, 0xff, 0x13, 0x9d, 0x7a, 0xc2, 0xbd, 0x84, 0x98, 0x9a, 0x7a, 0x2e, 0x99, 0xa8, 0xb7, 0x56,
- 0xfe, 0xf0, 0x16, 0x29, 0xc5, 0x2d, 0xd5, 0xfc, 0xe9, 0xe6, 0xe6, 0x43, 0xc5, 0xfc, 0x2a, 0x89,
- 0x7a, 0x80, 0xfc, 0x28, 0x85, 0xa0, 0x57, 0x60, 0x88, 0x98, 0xf1, 0x79, 0xc0, 0x66, 0x78, 0x2b,
- 0x77, 0x4a, 0xe7, 0xd0, 0x59, 0xe5, 0xd0, 0x39, 0x66, 0x19, 0x59, 0xb3, 0x84, 0x57, 0xa6, 0xb8,
- 0xa5, 0xcc, 0x55, 0xbb, 0x04, 0x6f, 0xab, 0xde, 0x9b, 0x45, 0x20, 0x89, 0x57, 0x8a, 0x27, 0x64,
- 0x90, 0xa4, 0x7e, 0xa8, 0x87, 0xc2, 0xff, 0xe6, 0xff, 0xba, 0x69, 0x62, 0x52, 0x20, 0xa2, 0xd7,
- 0x60, 0x8c, 0x45, 0x2e, 0x27, 0x31, 0x65, 0x18, 0xa9, 0x56, 0x0f, 0x88, 0xd6, 0xb4, 0xf6, 0x10,
- 0xac, 0xa2, 0xe1, 0xab, 0xe4, 0xe8, 0xa7, 0xa1, 0x92, 0xf3, 0x0c, 0x6a, 0xda, 0xb8, 0xf2, 0x1f,
- 0xb2, 0xa1, 0x29, 0xef, 0xca, 0x47, 0xa5, 0xf6, 0x35, 0xd8, 0xf7, 0x5d, 0xdc, 0x70, 0xea, 0xf3,
- 0xbb, 0xa7, 0x3e, 0x70, 0x91, 0xb7, 0xc7, 0x76, 0x3e, 0x42, 0x5d, 0x07, 0x0a, 0x99, 0xd0, 0xb8,
- 0x1e, 0x5c, 0x0c, 0xbe, 0xdc, 0x0c, 0xec, 0x7f, 0x90, 0x01, 0xd5, 0xe1, 0xf5, 0x60, 0x64, 0x97,
- 0x50, 0x0b, 0x9a, 0xa3, 0xfe, 0xf1, 0x70, 0x74, 0xd5, 0x3b, 0xbd, 0xb0, 0xcb, 0x68, 0x1b, 0xcc,
- 0x93, 0x5e, 0xbf, 0xef, 0x9e, 0x1c, 0xf7, 0xfa, 0x67, 0xdf, 0xec, 0x4a, 0xa7, 0x0b, 0x75, 0x3d,
- 0xac, 0x7c, 0x33, 0xbe, 0x8a, 0xaf, 0x9e, 0x47, 0x6f, 0xe4, 0x2b, 0x1d, 0xa7, 0x5c, 0x0f, 0x64,
- 0x10, 0xb5, 0xee, 0xfc, 0x2c, 0xc1, 0x56, 0xee, 0xd9, 0x4d, 0xc0, 0xe7, 0x97, 0xde, 0x12, 0x09,
- 0xc3, 0xfc, 0x8c, 0x53, 0x37, 0xf4, 0x96, 0x4b, 0x99, 0x83, 0x92, 0xf2, 0xf9, 0xe5, 0x46, 0x9f,
- 0x73, 0x8d, 0x73, 0x22, 0x04, 0x97, 0x9a, 0x9f, 0xa7, 0xca, 0xbf, 0x45, 0xda, 0x1f, 0xc0, 0xbe,
- 0x4f, 0x28, 0x1a, 0x66, 0x68, 0xc3, 0x76, 0x8b, 0x86, 0x59, 0x05, 0x67, 0xfc, 0xba, 0x6e, 0xfd,
- 0x3b, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x40, 0x3c, 0xbe, 0x3c, 0x05, 0x00, 0x00,
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
deleted file mode 100644
index 75d5a02..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
+++ /dev/null
@@ -1,78 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2014 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-syntax = "proto3";
-
-import "google/protobuf/any.proto";
-import "testdata/test.proto";
-
-package proto3_proto;
-
-message Message {
- enum Humour {
- UNKNOWN = 0;
- PUNS = 1;
- SLAPSTICK = 2;
- BILL_BAILEY = 3;
- }
-
- string name = 1;
- Humour hilarity = 2;
- uint32 height_in_cm = 3;
- bytes data = 4;
- int64 result_count = 7;
- bool true_scotsman = 8;
- float score = 9;
-
- repeated uint64 key = 5;
- repeated int32 short_key = 19;
- Nested nested = 6;
- repeated Humour r_funny = 16;
-
- map terrain = 10;
- testdata.SubDefaults proto2_field = 11;
- map proto2_value = 13;
-
- google.protobuf.Any anything = 14;
- repeated google.protobuf.Any many_things = 15;
-
- Message submessage = 17;
- repeated Message children = 18;
-}
-
-message Nested {
- string bunny = 1;
- bool cute = 2;
-}
-
-message MessageWithMap {
- map byte_mapping = 1;
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/proto3_test.go b/vendor/src/github.com/golang/protobuf/proto/proto3_test.go
deleted file mode 100644
index 462f805..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/proto3_test.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2014 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "testing"
-
- "github.com/golang/protobuf/proto"
- pb "github.com/golang/protobuf/proto/proto3_proto"
- tpb "github.com/golang/protobuf/proto/testdata"
-)
-
-func TestProto3ZeroValues(t *testing.T) {
- tests := []struct {
- desc string
- m proto.Message
- }{
- {"zero message", &pb.Message{}},
- {"empty bytes field", &pb.Message{Data: []byte{}}},
- }
- for _, test := range tests {
- b, err := proto.Marshal(test.m)
- if err != nil {
- t.Errorf("%s: proto.Marshal: %v", test.desc, err)
- continue
- }
- if len(b) > 0 {
- t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
- }
- }
-}
-
-func TestRoundTripProto3(t *testing.T) {
- m := &pb.Message{
- Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
- Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
- HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
- Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
- ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
- TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
- Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
-
- Key: []uint64{1, 0xdeadbeef},
- Nested: &pb.Nested{
- Bunny: "Monty",
- },
- }
- t.Logf(" m: %v", m)
-
- b, err := proto.Marshal(m)
- if err != nil {
- t.Fatalf("proto.Marshal: %v", err)
- }
- t.Logf(" b: %q", b)
-
- m2 := new(pb.Message)
- if err := proto.Unmarshal(b, m2); err != nil {
- t.Fatalf("proto.Unmarshal: %v", err)
- }
- t.Logf("m2: %v", m2)
-
- if !proto.Equal(m, m2) {
- t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
- }
-}
-
-func TestProto3SetDefaults(t *testing.T) {
- in := &pb.Message{
- Terrain: map[string]*pb.Nested{
- "meadow": new(pb.Nested),
- },
- Proto2Field: new(tpb.SubDefaults),
- Proto2Value: map[string]*tpb.SubDefaults{
- "badlands": new(tpb.SubDefaults),
- },
- }
-
- got := proto.Clone(in).(*pb.Message)
- proto.SetDefaults(got)
-
- // There are no defaults in proto3. Everything should be the zero value, but
- // we need to remember to set defaults for nested proto2 messages.
- want := &pb.Message{
- Terrain: map[string]*pb.Nested{
- "meadow": new(pb.Nested),
- },
- Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)},
- Proto2Value: map[string]*tpb.SubDefaults{
- "badlands": &tpb.SubDefaults{N: proto.Int64(7)},
- },
- }
-
- if !proto.Equal(got, want) {
- t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want)
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/size2_test.go b/vendor/src/github.com/golang/protobuf/proto/size2_test.go
deleted file mode 100644
index a2729c3..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/size2_test.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-import (
- "testing"
-)
-
-// This is a separate file and package from size_test.go because that one uses
-// generated messages and thus may not be in package proto without having a circular
-// dependency, whereas this file tests unexported details of size.go.
-
-func TestVarintSize(t *testing.T) {
- // Check the edge cases carefully.
- testCases := []struct {
- n uint64
- size int
- }{
- {0, 1},
- {1, 1},
- {127, 1},
- {128, 2},
- {16383, 2},
- {16384, 3},
- {1<<63 - 1, 9},
- {1 << 63, 10},
- }
- for _, tc := range testCases {
- size := sizeVarint(tc.n)
- if size != tc.size {
- t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size)
- }
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/size_test.go b/vendor/src/github.com/golang/protobuf/proto/size_test.go
deleted file mode 100644
index af1034d..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/size_test.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "log"
- "strings"
- "testing"
-
- . "github.com/golang/protobuf/proto"
- proto3pb "github.com/golang/protobuf/proto/proto3_proto"
- pb "github.com/golang/protobuf/proto/testdata"
-)
-
-var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
-
-// messageWithExtension2 is in equal_test.go.
-var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
-
-func init() {
- if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
- log.Panicf("SetExtension: %v", err)
- }
- if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
- log.Panicf("SetExtension: %v", err)
- }
-
- // Force messageWithExtension3 to have the extension encoded.
- Marshal(messageWithExtension3)
-
-}
-
-var SizeTests = []struct {
- desc string
- pb Message
-}{
- {"empty", &pb.OtherMessage{}},
- // Basic types.
- {"bool", &pb.Defaults{F_Bool: Bool(true)}},
- {"int32", &pb.Defaults{F_Int32: Int32(12)}},
- {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
- {"small int64", &pb.Defaults{F_Int64: Int64(1)}},
- {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
- {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
- {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
- {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
- {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
- {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
- {"float", &pb.Defaults{F_Float: Float32(12.6)}},
- {"double", &pb.Defaults{F_Double: Float64(13.9)}},
- {"string", &pb.Defaults{F_String: String("niles")}},
- {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
- {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
- {"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
- {"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
- {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
- // Repeated.
- {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
- {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
- {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
- {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
- {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
- {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
- // Need enough large numbers to verify that the header is counting the number of bytes
- // for the field, not the number of elements.
- 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
- 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
- }}},
- {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
- {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
- // Nested.
- {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
- {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
- // Other things.
- {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
- {"extension (unencoded)", messageWithExtension1},
- {"extension (encoded)", messageWithExtension3},
- // proto3 message
- {"proto3 empty", &proto3pb.Message{}},
- {"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
- {"proto3 int64", &proto3pb.Message{ResultCount: 1}},
- {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
- {"proto3 float", &proto3pb.Message{Score: 12.6}},
- {"proto3 string", &proto3pb.Message{Name: "Snezana"}},
- {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
- {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
- {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
- {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},
-
- {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
- {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
- {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
- {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},
-
- {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}},
- {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}},
- {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}},
-
- {"oneof not set", &pb.Oneof{}},
- {"oneof bool", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}},
- {"oneof zero int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}},
- {"oneof big int32", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}},
- {"oneof int64", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}},
- {"oneof fixed32", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}},
- {"oneof fixed64", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}},
- {"oneof uint32", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}},
- {"oneof uint64", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}},
- {"oneof float", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}},
- {"oneof double", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}},
- {"oneof string", &pb.Oneof{Union: &pb.Oneof_F_String{"Rhythmic Fman"}}},
- {"oneof bytes", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte("let go")}}},
- {"oneof sint32", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}},
- {"oneof sint64", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}},
- {"oneof enum", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}},
- {"message for oneof", &pb.GoTestField{Label: String("k"), Type: String("v")}},
- {"oneof message", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String("k"), Type: String("v")}}}},
- {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}},
- {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}},
- {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}},
-}
-
-func TestSize(t *testing.T) {
- for _, tc := range SizeTests {
- size := Size(tc.pb)
- b, err := Marshal(tc.pb)
- if err != nil {
- t.Errorf("%v: Marshal failed: %v", tc.desc, err)
- continue
- }
- if size != len(b) {
- t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
- t.Logf("%v: bytes: %#v", tc.desc, b)
- }
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/testdata/Makefile b/vendor/src/github.com/golang/protobuf/proto/testdata/Makefile
deleted file mode 100644
index fc28862..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/testdata/Makefile
+++ /dev/null
@@ -1,50 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-#
-# Copyright 2010 The Go Authors. All rights reserved.
-# https://github.com/golang/protobuf
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-include ../../Make.protobuf
-
-all: regenerate
-
-regenerate:
- rm -f test.pb.go
- make test.pb.go
-
-# The following rules are just aids to development. Not needed for typical testing.
-
-diff: regenerate
- git diff test.pb.go
-
-restore:
- cp test.pb.go.golden test.pb.go
-
-preserve:
- cp test.pb.go test.pb.go.golden
diff --git a/vendor/src/github.com/golang/protobuf/proto/testdata/golden_test.go b/vendor/src/github.com/golang/protobuf/proto/testdata/golden_test.go
deleted file mode 100644
index 7172d0e..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/testdata/golden_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2012 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Verify that the compiler output for test.proto is unchanged.
-
-package testdata
-
-import (
- "crypto/sha1"
- "fmt"
- "io/ioutil"
- "os"
- "os/exec"
- "path/filepath"
- "testing"
-)
-
-// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
-func sum(t *testing.T, name string) string {
- data, err := ioutil.ReadFile(name)
- if err != nil {
- t.Fatal(err)
- }
- t.Logf("sum(%q): length is %d", name, len(data))
- hash := sha1.New()
- _, err = hash.Write(data)
- if err != nil {
- t.Fatal(err)
- }
- return fmt.Sprintf("% x", hash.Sum(nil))
-}
-
-func run(t *testing.T, name string, args ...string) {
- cmd := exec.Command(name, args...)
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err := cmd.Run()
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func TestGolden(t *testing.T) {
- // Compute the original checksum.
- goldenSum := sum(t, "test.pb.go")
- // Run the proto compiler.
- run(t, "protoc", "--go_out="+os.TempDir(), "test.proto")
- newFile := filepath.Join(os.TempDir(), "test.pb.go")
- defer os.Remove(newFile)
- // Compute the new checksum.
- newSum := sum(t, newFile)
- // Verify
- if newSum != goldenSum {
- run(t, "diff", "-u", "test.pb.go", newFile)
- t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go")
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go b/vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go
deleted file mode 100644
index 85b3bdf..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/testdata/test.pb.go
+++ /dev/null
@@ -1,4061 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: test.proto
-// DO NOT EDIT!
-
-/*
-Package testdata is a generated protocol buffer package.
-
-It is generated from these files:
- test.proto
-
-It has these top-level messages:
- GoEnum
- GoTestField
- GoTest
- GoTestRequiredGroupField
- GoSkipTest
- NonPackedTest
- PackedTest
- MaxTag
- OldMessage
- NewMessage
- InnerMessage
- OtherMessage
- RequiredInnerMessage
- MyMessage
- Ext
- ComplexExtension
- DefaultsMessage
- MyMessageSet
- Empty
- MessageList
- Strings
- Defaults
- SubDefaults
- RepeatedEnum
- MoreRepeated
- GroupOld
- GroupNew
- FloatingPoint
- MessageWithMap
- Oneof
- Communique
-*/
-package testdata
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type FOO int32
-
-const (
- FOO_FOO1 FOO = 1
-)
-
-var FOO_name = map[int32]string{
- 1: "FOO1",
-}
-var FOO_value = map[string]int32{
- "FOO1": 1,
-}
-
-func (x FOO) Enum() *FOO {
- p := new(FOO)
- *p = x
- return p
-}
-func (x FOO) String() string {
- return proto.EnumName(FOO_name, int32(x))
-}
-func (x *FOO) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO")
- if err != nil {
- return err
- }
- *x = FOO(value)
- return nil
-}
-func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-// An enum, for completeness.
-type GoTest_KIND int32
-
-const (
- GoTest_VOID GoTest_KIND = 0
- // Basic types
- GoTest_BOOL GoTest_KIND = 1
- GoTest_BYTES GoTest_KIND = 2
- GoTest_FINGERPRINT GoTest_KIND = 3
- GoTest_FLOAT GoTest_KIND = 4
- GoTest_INT GoTest_KIND = 5
- GoTest_STRING GoTest_KIND = 6
- GoTest_TIME GoTest_KIND = 7
- // Groupings
- GoTest_TUPLE GoTest_KIND = 8
- GoTest_ARRAY GoTest_KIND = 9
- GoTest_MAP GoTest_KIND = 10
- // Table types
- GoTest_TABLE GoTest_KIND = 11
- // Functions
- GoTest_FUNCTION GoTest_KIND = 12
-)
-
-var GoTest_KIND_name = map[int32]string{
- 0: "VOID",
- 1: "BOOL",
- 2: "BYTES",
- 3: "FINGERPRINT",
- 4: "FLOAT",
- 5: "INT",
- 6: "STRING",
- 7: "TIME",
- 8: "TUPLE",
- 9: "ARRAY",
- 10: "MAP",
- 11: "TABLE",
- 12: "FUNCTION",
-}
-var GoTest_KIND_value = map[string]int32{
- "VOID": 0,
- "BOOL": 1,
- "BYTES": 2,
- "FINGERPRINT": 3,
- "FLOAT": 4,
- "INT": 5,
- "STRING": 6,
- "TIME": 7,
- "TUPLE": 8,
- "ARRAY": 9,
- "MAP": 10,
- "TABLE": 11,
- "FUNCTION": 12,
-}
-
-func (x GoTest_KIND) Enum() *GoTest_KIND {
- p := new(GoTest_KIND)
- *p = x
- return p
-}
-func (x GoTest_KIND) String() string {
- return proto.EnumName(GoTest_KIND_name, int32(x))
-}
-func (x *GoTest_KIND) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND")
- if err != nil {
- return err
- }
- *x = GoTest_KIND(value)
- return nil
-}
-func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
-
-type MyMessage_Color int32
-
-const (
- MyMessage_RED MyMessage_Color = 0
- MyMessage_GREEN MyMessage_Color = 1
- MyMessage_BLUE MyMessage_Color = 2
-)
-
-var MyMessage_Color_name = map[int32]string{
- 0: "RED",
- 1: "GREEN",
- 2: "BLUE",
-}
-var MyMessage_Color_value = map[string]int32{
- "RED": 0,
- "GREEN": 1,
- "BLUE": 2,
-}
-
-func (x MyMessage_Color) Enum() *MyMessage_Color {
- p := new(MyMessage_Color)
- *p = x
- return p
-}
-func (x MyMessage_Color) String() string {
- return proto.EnumName(MyMessage_Color_name, int32(x))
-}
-func (x *MyMessage_Color) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color")
- if err != nil {
- return err
- }
- *x = MyMessage_Color(value)
- return nil
-}
-func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} }
-
-type DefaultsMessage_DefaultsEnum int32
-
-const (
- DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0
- DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1
- DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2
-)
-
-var DefaultsMessage_DefaultsEnum_name = map[int32]string{
- 0: "ZERO",
- 1: "ONE",
- 2: "TWO",
-}
-var DefaultsMessage_DefaultsEnum_value = map[string]int32{
- "ZERO": 0,
- "ONE": 1,
- "TWO": 2,
-}
-
-func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum {
- p := new(DefaultsMessage_DefaultsEnum)
- *p = x
- return p
-}
-func (x DefaultsMessage_DefaultsEnum) String() string {
- return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x))
-}
-func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum")
- if err != nil {
- return err
- }
- *x = DefaultsMessage_DefaultsEnum(value)
- return nil
-}
-func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{16, 0}
-}
-
-type Defaults_Color int32
-
-const (
- Defaults_RED Defaults_Color = 0
- Defaults_GREEN Defaults_Color = 1
- Defaults_BLUE Defaults_Color = 2
-)
-
-var Defaults_Color_name = map[int32]string{
- 0: "RED",
- 1: "GREEN",
- 2: "BLUE",
-}
-var Defaults_Color_value = map[string]int32{
- "RED": 0,
- "GREEN": 1,
- "BLUE": 2,
-}
-
-func (x Defaults_Color) Enum() *Defaults_Color {
- p := new(Defaults_Color)
- *p = x
- return p
-}
-func (x Defaults_Color) String() string {
- return proto.EnumName(Defaults_Color_name, int32(x))
-}
-func (x *Defaults_Color) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color")
- if err != nil {
- return err
- }
- *x = Defaults_Color(value)
- return nil
-}
-func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{21, 0} }
-
-type RepeatedEnum_Color int32
-
-const (
- RepeatedEnum_RED RepeatedEnum_Color = 1
-)
-
-var RepeatedEnum_Color_name = map[int32]string{
- 1: "RED",
-}
-var RepeatedEnum_Color_value = map[string]int32{
- "RED": 1,
-}
-
-func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color {
- p := new(RepeatedEnum_Color)
- *p = x
- return p
-}
-func (x RepeatedEnum_Color) String() string {
- return proto.EnumName(RepeatedEnum_Color_name, int32(x))
-}
-func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color")
- if err != nil {
- return err
- }
- *x = RepeatedEnum_Color(value)
- return nil
-}
-func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{23, 0} }
-
-type GoEnum struct {
- Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoEnum) Reset() { *m = GoEnum{} }
-func (m *GoEnum) String() string { return proto.CompactTextString(m) }
-func (*GoEnum) ProtoMessage() {}
-func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-func (m *GoEnum) GetFoo() FOO {
- if m != nil && m.Foo != nil {
- return *m.Foo
- }
- return FOO_FOO1
-}
-
-type GoTestField struct {
- Label *string `protobuf:"bytes,1,req,name=Label,json=label" json:"Label,omitempty"`
- Type *string `protobuf:"bytes,2,req,name=Type,json=type" json:"Type,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTestField) Reset() { *m = GoTestField{} }
-func (m *GoTestField) String() string { return proto.CompactTextString(m) }
-func (*GoTestField) ProtoMessage() {}
-func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-
-func (m *GoTestField) GetLabel() string {
- if m != nil && m.Label != nil {
- return *m.Label
- }
- return ""
-}
-
-func (m *GoTestField) GetType() string {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return ""
-}
-
-type GoTest struct {
- // Some typical parameters
- Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,json=kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"`
- Table *string `protobuf:"bytes,2,opt,name=Table,json=table" json:"Table,omitempty"`
- Param *int32 `protobuf:"varint,3,opt,name=Param,json=param" json:"Param,omitempty"`
- // Required, repeated and optional foreign fields.
- RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"`
- RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField,json=repeatedField" json:"RepeatedField,omitempty"`
- OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField,json=optionalField" json:"OptionalField,omitempty"`
- // Required fields of all basic types
- F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=fBoolRequired" json:"F_Bool_required,omitempty"`
- F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=fInt32Required" json:"F_Int32_required,omitempty"`
- F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=fInt64Required" json:"F_Int64_required,omitempty"`
- F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=fFixed32Required" json:"F_Fixed32_required,omitempty"`
- F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=fFixed64Required" json:"F_Fixed64_required,omitempty"`
- F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=fUint32Required" json:"F_Uint32_required,omitempty"`
- F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=fUint64Required" json:"F_Uint64_required,omitempty"`
- F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=fFloatRequired" json:"F_Float_required,omitempty"`
- F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=fDoubleRequired" json:"F_Double_required,omitempty"`
- F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=fStringRequired" json:"F_String_required,omitempty"`
- F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=fBytesRequired" json:"F_Bytes_required,omitempty"`
- F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=fSint32Required" json:"F_Sint32_required,omitempty"`
- F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=fSint64Required" json:"F_Sint64_required,omitempty"`
- // Repeated fields of all basic types
- F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=fBoolRepeated" json:"F_Bool_repeated,omitempty"`
- F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=fInt32Repeated" json:"F_Int32_repeated,omitempty"`
- F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=fInt64Repeated" json:"F_Int64_repeated,omitempty"`
- F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=fFixed32Repeated" json:"F_Fixed32_repeated,omitempty"`
- F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=fFixed64Repeated" json:"F_Fixed64_repeated,omitempty"`
- F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=fUint32Repeated" json:"F_Uint32_repeated,omitempty"`
- F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=fUint64Repeated" json:"F_Uint64_repeated,omitempty"`
- F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=fFloatRepeated" json:"F_Float_repeated,omitempty"`
- F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=fDoubleRepeated" json:"F_Double_repeated,omitempty"`
- F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=fStringRepeated" json:"F_String_repeated,omitempty"`
- F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=fBytesRepeated" json:"F_Bytes_repeated,omitempty"`
- F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=fSint32Repeated" json:"F_Sint32_repeated,omitempty"`
- F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=fSint64Repeated" json:"F_Sint64_repeated,omitempty"`
- // Optional fields of all basic types
- F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=fBoolOptional" json:"F_Bool_optional,omitempty"`
- F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=fInt32Optional" json:"F_Int32_optional,omitempty"`
- F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=fInt64Optional" json:"F_Int64_optional,omitempty"`
- F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=fFixed32Optional" json:"F_Fixed32_optional,omitempty"`
- F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=fFixed64Optional" json:"F_Fixed64_optional,omitempty"`
- F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=fUint32Optional" json:"F_Uint32_optional,omitempty"`
- F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=fUint64Optional" json:"F_Uint64_optional,omitempty"`
- F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=fFloatOptional" json:"F_Float_optional,omitempty"`
- F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=fDoubleOptional" json:"F_Double_optional,omitempty"`
- F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=fStringOptional" json:"F_String_optional,omitempty"`
- F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=fBytesOptional" json:"F_Bytes_optional,omitempty"`
- F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=fSint32Optional" json:"F_Sint32_optional,omitempty"`
- F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=fSint64Optional" json:"F_Sint64_optional,omitempty"`
- // Default-valued fields of all basic types
- F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=fBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"`
- F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=fInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"`
- F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=fInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"`
- F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=fFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"`
- F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=fFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"`
- F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=fUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"`
- F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=fUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"`
- F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=fFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"`
- F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=fDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"`
- F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=fStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"`
- F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=fBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"`
- F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=fSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"`
- F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=fSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"`
- // Packed repeated fields (no string or bytes).
- F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=fBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"`
- F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=fInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"`
- F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=fInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"`
- F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=fFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"`
- F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=fFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"`
- F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=fUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"`
- F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=fUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"`
- F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=fFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"`
- F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=fDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"`
- F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=fSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"`
- F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=fSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"`
- Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"`
- Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"`
- Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTest) Reset() { *m = GoTest{} }
-func (m *GoTest) String() string { return proto.CompactTextString(m) }
-func (*GoTest) ProtoMessage() {}
-func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-const Default_GoTest_F_BoolDefaulted bool = true
-const Default_GoTest_F_Int32Defaulted int32 = 32
-const Default_GoTest_F_Int64Defaulted int64 = 64
-const Default_GoTest_F_Fixed32Defaulted uint32 = 320
-const Default_GoTest_F_Fixed64Defaulted uint64 = 640
-const Default_GoTest_F_Uint32Defaulted uint32 = 3200
-const Default_GoTest_F_Uint64Defaulted uint64 = 6400
-const Default_GoTest_F_FloatDefaulted float32 = 314159
-const Default_GoTest_F_DoubleDefaulted float64 = 271828
-const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n"
-
-var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose")
-
-const Default_GoTest_F_Sint32Defaulted int32 = -32
-const Default_GoTest_F_Sint64Defaulted int64 = -64
-
-func (m *GoTest) GetKind() GoTest_KIND {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return GoTest_VOID
-}
-
-func (m *GoTest) GetTable() string {
- if m != nil && m.Table != nil {
- return *m.Table
- }
- return ""
-}
-
-func (m *GoTest) GetParam() int32 {
- if m != nil && m.Param != nil {
- return *m.Param
- }
- return 0
-}
-
-func (m *GoTest) GetRequiredField() *GoTestField {
- if m != nil {
- return m.RequiredField
- }
- return nil
-}
-
-func (m *GoTest) GetRepeatedField() []*GoTestField {
- if m != nil {
- return m.RepeatedField
- }
- return nil
-}
-
-func (m *GoTest) GetOptionalField() *GoTestField {
- if m != nil {
- return m.OptionalField
- }
- return nil
-}
-
-func (m *GoTest) GetF_BoolRequired() bool {
- if m != nil && m.F_BoolRequired != nil {
- return *m.F_BoolRequired
- }
- return false
-}
-
-func (m *GoTest) GetF_Int32Required() int32 {
- if m != nil && m.F_Int32Required != nil {
- return *m.F_Int32Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Int64Required() int64 {
- if m != nil && m.F_Int64Required != nil {
- return *m.F_Int64Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Fixed32Required() uint32 {
- if m != nil && m.F_Fixed32Required != nil {
- return *m.F_Fixed32Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Fixed64Required() uint64 {
- if m != nil && m.F_Fixed64Required != nil {
- return *m.F_Fixed64Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Uint32Required() uint32 {
- if m != nil && m.F_Uint32Required != nil {
- return *m.F_Uint32Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Uint64Required() uint64 {
- if m != nil && m.F_Uint64Required != nil {
- return *m.F_Uint64Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_FloatRequired() float32 {
- if m != nil && m.F_FloatRequired != nil {
- return *m.F_FloatRequired
- }
- return 0
-}
-
-func (m *GoTest) GetF_DoubleRequired() float64 {
- if m != nil && m.F_DoubleRequired != nil {
- return *m.F_DoubleRequired
- }
- return 0
-}
-
-func (m *GoTest) GetF_StringRequired() string {
- if m != nil && m.F_StringRequired != nil {
- return *m.F_StringRequired
- }
- return ""
-}
-
-func (m *GoTest) GetF_BytesRequired() []byte {
- if m != nil {
- return m.F_BytesRequired
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint32Required() int32 {
- if m != nil && m.F_Sint32Required != nil {
- return *m.F_Sint32Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_Sint64Required() int64 {
- if m != nil && m.F_Sint64Required != nil {
- return *m.F_Sint64Required
- }
- return 0
-}
-
-func (m *GoTest) GetF_BoolRepeated() []bool {
- if m != nil {
- return m.F_BoolRepeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Int32Repeated() []int32 {
- if m != nil {
- return m.F_Int32Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Int64Repeated() []int64 {
- if m != nil {
- return m.F_Int64Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Fixed32Repeated() []uint32 {
- if m != nil {
- return m.F_Fixed32Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Fixed64Repeated() []uint64 {
- if m != nil {
- return m.F_Fixed64Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Uint32Repeated() []uint32 {
- if m != nil {
- return m.F_Uint32Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Uint64Repeated() []uint64 {
- if m != nil {
- return m.F_Uint64Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_FloatRepeated() []float32 {
- if m != nil {
- return m.F_FloatRepeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_DoubleRepeated() []float64 {
- if m != nil {
- return m.F_DoubleRepeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_StringRepeated() []string {
- if m != nil {
- return m.F_StringRepeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_BytesRepeated() [][]byte {
- if m != nil {
- return m.F_BytesRepeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint32Repeated() []int32 {
- if m != nil {
- return m.F_Sint32Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint64Repeated() []int64 {
- if m != nil {
- return m.F_Sint64Repeated
- }
- return nil
-}
-
-func (m *GoTest) GetF_BoolOptional() bool {
- if m != nil && m.F_BoolOptional != nil {
- return *m.F_BoolOptional
- }
- return false
-}
-
-func (m *GoTest) GetF_Int32Optional() int32 {
- if m != nil && m.F_Int32Optional != nil {
- return *m.F_Int32Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Int64Optional() int64 {
- if m != nil && m.F_Int64Optional != nil {
- return *m.F_Int64Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Fixed32Optional() uint32 {
- if m != nil && m.F_Fixed32Optional != nil {
- return *m.F_Fixed32Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Fixed64Optional() uint64 {
- if m != nil && m.F_Fixed64Optional != nil {
- return *m.F_Fixed64Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Uint32Optional() uint32 {
- if m != nil && m.F_Uint32Optional != nil {
- return *m.F_Uint32Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Uint64Optional() uint64 {
- if m != nil && m.F_Uint64Optional != nil {
- return *m.F_Uint64Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_FloatOptional() float32 {
- if m != nil && m.F_FloatOptional != nil {
- return *m.F_FloatOptional
- }
- return 0
-}
-
-func (m *GoTest) GetF_DoubleOptional() float64 {
- if m != nil && m.F_DoubleOptional != nil {
- return *m.F_DoubleOptional
- }
- return 0
-}
-
-func (m *GoTest) GetF_StringOptional() string {
- if m != nil && m.F_StringOptional != nil {
- return *m.F_StringOptional
- }
- return ""
-}
-
-func (m *GoTest) GetF_BytesOptional() []byte {
- if m != nil {
- return m.F_BytesOptional
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint32Optional() int32 {
- if m != nil && m.F_Sint32Optional != nil {
- return *m.F_Sint32Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_Sint64Optional() int64 {
- if m != nil && m.F_Sint64Optional != nil {
- return *m.F_Sint64Optional
- }
- return 0
-}
-
-func (m *GoTest) GetF_BoolDefaulted() bool {
- if m != nil && m.F_BoolDefaulted != nil {
- return *m.F_BoolDefaulted
- }
- return Default_GoTest_F_BoolDefaulted
-}
-
-func (m *GoTest) GetF_Int32Defaulted() int32 {
- if m != nil && m.F_Int32Defaulted != nil {
- return *m.F_Int32Defaulted
- }
- return Default_GoTest_F_Int32Defaulted
-}
-
-func (m *GoTest) GetF_Int64Defaulted() int64 {
- if m != nil && m.F_Int64Defaulted != nil {
- return *m.F_Int64Defaulted
- }
- return Default_GoTest_F_Int64Defaulted
-}
-
-func (m *GoTest) GetF_Fixed32Defaulted() uint32 {
- if m != nil && m.F_Fixed32Defaulted != nil {
- return *m.F_Fixed32Defaulted
- }
- return Default_GoTest_F_Fixed32Defaulted
-}
-
-func (m *GoTest) GetF_Fixed64Defaulted() uint64 {
- if m != nil && m.F_Fixed64Defaulted != nil {
- return *m.F_Fixed64Defaulted
- }
- return Default_GoTest_F_Fixed64Defaulted
-}
-
-func (m *GoTest) GetF_Uint32Defaulted() uint32 {
- if m != nil && m.F_Uint32Defaulted != nil {
- return *m.F_Uint32Defaulted
- }
- return Default_GoTest_F_Uint32Defaulted
-}
-
-func (m *GoTest) GetF_Uint64Defaulted() uint64 {
- if m != nil && m.F_Uint64Defaulted != nil {
- return *m.F_Uint64Defaulted
- }
- return Default_GoTest_F_Uint64Defaulted
-}
-
-func (m *GoTest) GetF_FloatDefaulted() float32 {
- if m != nil && m.F_FloatDefaulted != nil {
- return *m.F_FloatDefaulted
- }
- return Default_GoTest_F_FloatDefaulted
-}
-
-func (m *GoTest) GetF_DoubleDefaulted() float64 {
- if m != nil && m.F_DoubleDefaulted != nil {
- return *m.F_DoubleDefaulted
- }
- return Default_GoTest_F_DoubleDefaulted
-}
-
-func (m *GoTest) GetF_StringDefaulted() string {
- if m != nil && m.F_StringDefaulted != nil {
- return *m.F_StringDefaulted
- }
- return Default_GoTest_F_StringDefaulted
-}
-
-func (m *GoTest) GetF_BytesDefaulted() []byte {
- if m != nil && m.F_BytesDefaulted != nil {
- return m.F_BytesDefaulted
- }
- return append([]byte(nil), Default_GoTest_F_BytesDefaulted...)
-}
-
-func (m *GoTest) GetF_Sint32Defaulted() int32 {
- if m != nil && m.F_Sint32Defaulted != nil {
- return *m.F_Sint32Defaulted
- }
- return Default_GoTest_F_Sint32Defaulted
-}
-
-func (m *GoTest) GetF_Sint64Defaulted() int64 {
- if m != nil && m.F_Sint64Defaulted != nil {
- return *m.F_Sint64Defaulted
- }
- return Default_GoTest_F_Sint64Defaulted
-}
-
-func (m *GoTest) GetF_BoolRepeatedPacked() []bool {
- if m != nil {
- return m.F_BoolRepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Int32RepeatedPacked() []int32 {
- if m != nil {
- return m.F_Int32RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Int64RepeatedPacked() []int64 {
- if m != nil {
- return m.F_Int64RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 {
- if m != nil {
- return m.F_Fixed32RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 {
- if m != nil {
- return m.F_Fixed64RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 {
- if m != nil {
- return m.F_Uint32RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 {
- if m != nil {
- return m.F_Uint64RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_FloatRepeatedPacked() []float32 {
- if m != nil {
- return m.F_FloatRepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 {
- if m != nil {
- return m.F_DoubleRepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 {
- if m != nil {
- return m.F_Sint32RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 {
- if m != nil {
- return m.F_Sint64RepeatedPacked
- }
- return nil
-}
-
-func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup {
- if m != nil {
- return m.Requiredgroup
- }
- return nil
-}
-
-func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup {
- if m != nil {
- return m.Repeatedgroup
- }
- return nil
-}
-
-func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup {
- if m != nil {
- return m.Optionalgroup
- }
- return nil
-}
-
-// Required, repeated, and optional groups.
-type GoTest_RequiredGroup struct {
- RequiredField *string `protobuf:"bytes,71,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} }
-func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) }
-func (*GoTest_RequiredGroup) ProtoMessage() {}
-func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
-
-func (m *GoTest_RequiredGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
-}
-
-type GoTest_RepeatedGroup struct {
- RequiredField *string `protobuf:"bytes,81,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} }
-func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) }
-func (*GoTest_RepeatedGroup) ProtoMessage() {}
-func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} }
-
-func (m *GoTest_RepeatedGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
-}
-
-type GoTest_OptionalGroup struct {
- RequiredField *string `protobuf:"bytes,91,req,name=RequiredField,json=requiredField" json:"RequiredField,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} }
-func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) }
-func (*GoTest_OptionalGroup) ProtoMessage() {}
-func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} }
-
-func (m *GoTest_OptionalGroup) GetRequiredField() string {
- if m != nil && m.RequiredField != nil {
- return *m.RequiredField
- }
- return ""
-}
-
-// For testing a group containing a required field.
-type GoTestRequiredGroupField struct {
- Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} }
-func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) }
-func (*GoTestRequiredGroupField) ProtoMessage() {}
-func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
-
-func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group {
- if m != nil {
- return m.Group
- }
- return nil
-}
-
-type GoTestRequiredGroupField_Group struct {
- Field *int32 `protobuf:"varint,2,req,name=Field,json=field" json:"Field,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} }
-func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) }
-func (*GoTestRequiredGroupField_Group) ProtoMessage() {}
-func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{3, 0}
-}
-
-func (m *GoTestRequiredGroupField_Group) GetField() int32 {
- if m != nil && m.Field != nil {
- return *m.Field
- }
- return 0
-}
-
-// For testing skipping of unrecognized fields.
-// Numbers are all big, larger than tag numbers in GoTestField,
-// the message used in the corresponding test.
-type GoSkipTest struct {
- SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"`
- SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"`
- SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"`
- SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"`
- Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoSkipTest) Reset() { *m = GoSkipTest{} }
-func (m *GoSkipTest) String() string { return proto.CompactTextString(m) }
-func (*GoSkipTest) ProtoMessage() {}
-func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
-
-func (m *GoSkipTest) GetSkipInt32() int32 {
- if m != nil && m.SkipInt32 != nil {
- return *m.SkipInt32
- }
- return 0
-}
-
-func (m *GoSkipTest) GetSkipFixed32() uint32 {
- if m != nil && m.SkipFixed32 != nil {
- return *m.SkipFixed32
- }
- return 0
-}
-
-func (m *GoSkipTest) GetSkipFixed64() uint64 {
- if m != nil && m.SkipFixed64 != nil {
- return *m.SkipFixed64
- }
- return 0
-}
-
-func (m *GoSkipTest) GetSkipString() string {
- if m != nil && m.SkipString != nil {
- return *m.SkipString
- }
- return ""
-}
-
-func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup {
- if m != nil {
- return m.Skipgroup
- }
- return nil
-}
-
-type GoSkipTest_SkipGroup struct {
- GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"`
- GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} }
-func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) }
-func (*GoSkipTest_SkipGroup) ProtoMessage() {}
-func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} }
-
-func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 {
- if m != nil && m.GroupInt32 != nil {
- return *m.GroupInt32
- }
- return 0
-}
-
-func (m *GoSkipTest_SkipGroup) GetGroupString() string {
- if m != nil && m.GroupString != nil {
- return *m.GroupString
- }
- return ""
-}
-
-// For testing packed/non-packed decoder switching.
-// A serialized instance of one should be deserializable as the other.
-type NonPackedTest struct {
- A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *NonPackedTest) Reset() { *m = NonPackedTest{} }
-func (m *NonPackedTest) String() string { return proto.CompactTextString(m) }
-func (*NonPackedTest) ProtoMessage() {}
-func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
-
-func (m *NonPackedTest) GetA() []int32 {
- if m != nil {
- return m.A
- }
- return nil
-}
-
-type PackedTest struct {
- B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *PackedTest) Reset() { *m = PackedTest{} }
-func (m *PackedTest) String() string { return proto.CompactTextString(m) }
-func (*PackedTest) ProtoMessage() {}
-func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
-
-func (m *PackedTest) GetB() []int32 {
- if m != nil {
- return m.B
- }
- return nil
-}
-
-type MaxTag struct {
- // Maximum possible tag number.
- LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MaxTag) Reset() { *m = MaxTag{} }
-func (m *MaxTag) String() string { return proto.CompactTextString(m) }
-func (*MaxTag) ProtoMessage() {}
-func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
-
-func (m *MaxTag) GetLastField() string {
- if m != nil && m.LastField != nil {
- return *m.LastField
- }
- return ""
-}
-
-type OldMessage struct {
- Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
- Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *OldMessage) Reset() { *m = OldMessage{} }
-func (m *OldMessage) String() string { return proto.CompactTextString(m) }
-func (*OldMessage) ProtoMessage() {}
-func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
-
-func (m *OldMessage) GetNested() *OldMessage_Nested {
- if m != nil {
- return m.Nested
- }
- return nil
-}
-
-func (m *OldMessage) GetNum() int32 {
- if m != nil && m.Num != nil {
- return *m.Num
- }
- return 0
-}
-
-type OldMessage_Nested struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} }
-func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) }
-func (*OldMessage_Nested) ProtoMessage() {}
-func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} }
-
-func (m *OldMessage_Nested) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-// NewMessage is wire compatible with OldMessage;
-// imagine it as a future version.
-type NewMessage struct {
- Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
- // This is an int32 in OldMessage.
- Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *NewMessage) Reset() { *m = NewMessage{} }
-func (m *NewMessage) String() string { return proto.CompactTextString(m) }
-func (*NewMessage) ProtoMessage() {}
-func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
-
-func (m *NewMessage) GetNested() *NewMessage_Nested {
- if m != nil {
- return m.Nested
- }
- return nil
-}
-
-func (m *NewMessage) GetNum() int64 {
- if m != nil && m.Num != nil {
- return *m.Num
- }
- return 0
-}
-
-type NewMessage_Nested struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} }
-func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) }
-func (*NewMessage_Nested) ProtoMessage() {}
-func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} }
-
-func (m *NewMessage_Nested) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *NewMessage_Nested) GetFoodGroup() string {
- if m != nil && m.FoodGroup != nil {
- return *m.FoodGroup
- }
- return ""
-}
-
-type InnerMessage struct {
- Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"`
- Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"`
- Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *InnerMessage) Reset() { *m = InnerMessage{} }
-func (m *InnerMessage) String() string { return proto.CompactTextString(m) }
-func (*InnerMessage) ProtoMessage() {}
-func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
-
-const Default_InnerMessage_Port int32 = 4000
-
-func (m *InnerMessage) GetHost() string {
- if m != nil && m.Host != nil {
- return *m.Host
- }
- return ""
-}
-
-func (m *InnerMessage) GetPort() int32 {
- if m != nil && m.Port != nil {
- return *m.Port
- }
- return Default_InnerMessage_Port
-}
-
-func (m *InnerMessage) GetConnected() bool {
- if m != nil && m.Connected != nil {
- return *m.Connected
- }
- return false
-}
-
-type OtherMessage struct {
- Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"`
- Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *OtherMessage) Reset() { *m = OtherMessage{} }
-func (m *OtherMessage) String() string { return proto.CompactTextString(m) }
-func (*OtherMessage) ProtoMessage() {}
-func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
-
-var extRange_OtherMessage = []proto.ExtensionRange{
- {100, 536870911},
-}
-
-func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_OtherMessage
-}
-
-func (m *OtherMessage) GetKey() int64 {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return 0
-}
-
-func (m *OtherMessage) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *OtherMessage) GetWeight() float32 {
- if m != nil && m.Weight != nil {
- return *m.Weight
- }
- return 0
-}
-
-func (m *OtherMessage) GetInner() *InnerMessage {
- if m != nil {
- return m.Inner
- }
- return nil
-}
-
-type RequiredInnerMessage struct {
- LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} }
-func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) }
-func (*RequiredInnerMessage) ProtoMessage() {}
-func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
-
-func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage {
- if m != nil {
- return m.LeoFinallyWonAnOscar
- }
- return nil
-}
-
-type MyMessage struct {
- Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"`
- Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
- Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"`
- Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"`
- Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"`
- Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"`
- WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"`
- RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"`
- Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"`
- Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"`
- // This field becomes [][]byte in the generated code.
- RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"`
- Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"`
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MyMessage) Reset() { *m = MyMessage{} }
-func (m *MyMessage) String() string { return proto.CompactTextString(m) }
-func (*MyMessage) ProtoMessage() {}
-func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
-
-var extRange_MyMessage = []proto.ExtensionRange{
- {100, 536870911},
-}
-
-func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_MyMessage
-}
-
-func (m *MyMessage) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *MyMessage) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *MyMessage) GetQuote() string {
- if m != nil && m.Quote != nil {
- return *m.Quote
- }
- return ""
-}
-
-func (m *MyMessage) GetPet() []string {
- if m != nil {
- return m.Pet
- }
- return nil
-}
-
-func (m *MyMessage) GetInner() *InnerMessage {
- if m != nil {
- return m.Inner
- }
- return nil
-}
-
-func (m *MyMessage) GetOthers() []*OtherMessage {
- if m != nil {
- return m.Others
- }
- return nil
-}
-
-func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage {
- if m != nil {
- return m.WeMustGoDeeper
- }
- return nil
-}
-
-func (m *MyMessage) GetRepInner() []*InnerMessage {
- if m != nil {
- return m.RepInner
- }
- return nil
-}
-
-func (m *MyMessage) GetBikeshed() MyMessage_Color {
- if m != nil && m.Bikeshed != nil {
- return *m.Bikeshed
- }
- return MyMessage_RED
-}
-
-func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup {
- if m != nil {
- return m.Somegroup
- }
- return nil
-}
-
-func (m *MyMessage) GetRepBytes() [][]byte {
- if m != nil {
- return m.RepBytes
- }
- return nil
-}
-
-func (m *MyMessage) GetBigfloat() float64 {
- if m != nil && m.Bigfloat != nil {
- return *m.Bigfloat
- }
- return 0
-}
-
-type MyMessage_SomeGroup struct {
- GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} }
-func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) }
-func (*MyMessage_SomeGroup) ProtoMessage() {}
-func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} }
-
-func (m *MyMessage_SomeGroup) GetGroupField() int32 {
- if m != nil && m.GroupField != nil {
- return *m.GroupField
- }
- return 0
-}
-
-type Ext struct {
- Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Ext) Reset() { *m = Ext{} }
-func (m *Ext) String() string { return proto.CompactTextString(m) }
-func (*Ext) ProtoMessage() {}
-func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
-
-func (m *Ext) GetData() string {
- if m != nil && m.Data != nil {
- return *m.Data
- }
- return ""
-}
-
-var E_Ext_More = &proto.ExtensionDesc{
- ExtendedType: (*MyMessage)(nil),
- ExtensionType: (*Ext)(nil),
- Field: 103,
- Name: "testdata.Ext.more",
- Tag: "bytes,103,opt,name=more",
-}
-
-var E_Ext_Text = &proto.ExtensionDesc{
- ExtendedType: (*MyMessage)(nil),
- ExtensionType: (*string)(nil),
- Field: 104,
- Name: "testdata.Ext.text",
- Tag: "bytes,104,opt,name=text",
-}
-
-var E_Ext_Number = &proto.ExtensionDesc{
- ExtendedType: (*MyMessage)(nil),
- ExtensionType: (*int32)(nil),
- Field: 105,
- Name: "testdata.Ext.number",
- Tag: "varint,105,opt,name=number",
-}
-
-type ComplexExtension struct {
- First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"`
- Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"`
- Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *ComplexExtension) Reset() { *m = ComplexExtension{} }
-func (m *ComplexExtension) String() string { return proto.CompactTextString(m) }
-func (*ComplexExtension) ProtoMessage() {}
-func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
-
-func (m *ComplexExtension) GetFirst() int32 {
- if m != nil && m.First != nil {
- return *m.First
- }
- return 0
-}
-
-func (m *ComplexExtension) GetSecond() int32 {
- if m != nil && m.Second != nil {
- return *m.Second
- }
- return 0
-}
-
-func (m *ComplexExtension) GetThird() []int32 {
- if m != nil {
- return m.Third
- }
- return nil
-}
-
-type DefaultsMessage struct {
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} }
-func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) }
-func (*DefaultsMessage) ProtoMessage() {}
-func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
-
-var extRange_DefaultsMessage = []proto.ExtensionRange{
- {100, 536870911},
-}
-
-func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_DefaultsMessage
-}
-
-type MyMessageSet struct {
- proto.XXX_InternalExtensions `json:"-"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MyMessageSet) Reset() { *m = MyMessageSet{} }
-func (m *MyMessageSet) String() string { return proto.CompactTextString(m) }
-func (*MyMessageSet) ProtoMessage() {}
-func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
-
-func (m *MyMessageSet) Marshal() ([]byte, error) {
- return proto.MarshalMessageSet(&m.XXX_InternalExtensions)
-}
-func (m *MyMessageSet) Unmarshal(buf []byte) error {
- return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)
-}
-func (m *MyMessageSet) MarshalJSON() ([]byte, error) {
- return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions)
-}
-func (m *MyMessageSet) UnmarshalJSON(buf []byte) error {
- return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)
-}
-
-// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler
-var _ proto.Marshaler = (*MyMessageSet)(nil)
-var _ proto.Unmarshaler = (*MyMessageSet)(nil)
-
-var extRange_MyMessageSet = []proto.ExtensionRange{
- {100, 2147483646},
-}
-
-func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange {
- return extRange_MyMessageSet
-}
-
-type Empty struct {
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Empty) Reset() { *m = Empty{} }
-func (m *Empty) String() string { return proto.CompactTextString(m) }
-func (*Empty) ProtoMessage() {}
-func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
-
-type MessageList struct {
- Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MessageList) Reset() { *m = MessageList{} }
-func (m *MessageList) String() string { return proto.CompactTextString(m) }
-func (*MessageList) ProtoMessage() {}
-func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
-
-func (m *MessageList) GetMessage() []*MessageList_Message {
- if m != nil {
- return m.Message
- }
- return nil
-}
-
-type MessageList_Message struct {
- Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"`
- Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MessageList_Message) Reset() { *m = MessageList_Message{} }
-func (m *MessageList_Message) String() string { return proto.CompactTextString(m) }
-func (*MessageList_Message) ProtoMessage() {}
-func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} }
-
-func (m *MessageList_Message) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *MessageList_Message) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-type Strings struct {
- StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"`
- BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Strings) Reset() { *m = Strings{} }
-func (m *Strings) String() string { return proto.CompactTextString(m) }
-func (*Strings) ProtoMessage() {}
-func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
-
-func (m *Strings) GetStringField() string {
- if m != nil && m.StringField != nil {
- return *m.StringField
- }
- return ""
-}
-
-func (m *Strings) GetBytesField() []byte {
- if m != nil {
- return m.BytesField
- }
- return nil
-}
-
-type Defaults struct {
- // Default-valued fields of all basic types.
- // Same as GoTest, but copied here to make testing easier.
- F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,def=1" json:"F_Bool,omitempty"`
- F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,def=32" json:"F_Int32,omitempty"`
- F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,def=64" json:"F_Int64,omitempty"`
- F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,def=320" json:"F_Fixed32,omitempty"`
- F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,def=640" json:"F_Fixed64,omitempty"`
- F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,def=3200" json:"F_Uint32,omitempty"`
- F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,def=6400" json:"F_Uint64,omitempty"`
- F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,def=314159" json:"F_Float,omitempty"`
- F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,def=271828" json:"F_Double,omitempty"`
- F_String *string `protobuf:"bytes,10,opt,name=F_String,json=fString,def=hello, \"world!\"\n" json:"F_String,omitempty"`
- F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,def=Bignose" json:"F_Bytes,omitempty"`
- F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,def=-32" json:"F_Sint32,omitempty"`
- F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,def=-64" json:"F_Sint64,omitempty"`
- F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"`
- // More fields with crazy defaults.
- F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=fPinf,def=inf" json:"F_Pinf,omitempty"`
- F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=fNinf,def=-inf" json:"F_Ninf,omitempty"`
- F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=fNan,def=nan" json:"F_Nan,omitempty"`
- // Sub-message.
- Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"`
- // Redundant but explicit defaults.
- StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Defaults) Reset() { *m = Defaults{} }
-func (m *Defaults) String() string { return proto.CompactTextString(m) }
-func (*Defaults) ProtoMessage() {}
-func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
-
-const Default_Defaults_F_Bool bool = true
-const Default_Defaults_F_Int32 int32 = 32
-const Default_Defaults_F_Int64 int64 = 64
-const Default_Defaults_F_Fixed32 uint32 = 320
-const Default_Defaults_F_Fixed64 uint64 = 640
-const Default_Defaults_F_Uint32 uint32 = 3200
-const Default_Defaults_F_Uint64 uint64 = 6400
-const Default_Defaults_F_Float float32 = 314159
-const Default_Defaults_F_Double float64 = 271828
-const Default_Defaults_F_String string = "hello, \"world!\"\n"
-
-var Default_Defaults_F_Bytes []byte = []byte("Bignose")
-
-const Default_Defaults_F_Sint32 int32 = -32
-const Default_Defaults_F_Sint64 int64 = -64
-const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN
-
-var Default_Defaults_F_Pinf float32 = float32(math.Inf(1))
-var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1))
-var Default_Defaults_F_Nan float32 = float32(math.NaN())
-
-func (m *Defaults) GetF_Bool() bool {
- if m != nil && m.F_Bool != nil {
- return *m.F_Bool
- }
- return Default_Defaults_F_Bool
-}
-
-func (m *Defaults) GetF_Int32() int32 {
- if m != nil && m.F_Int32 != nil {
- return *m.F_Int32
- }
- return Default_Defaults_F_Int32
-}
-
-func (m *Defaults) GetF_Int64() int64 {
- if m != nil && m.F_Int64 != nil {
- return *m.F_Int64
- }
- return Default_Defaults_F_Int64
-}
-
-func (m *Defaults) GetF_Fixed32() uint32 {
- if m != nil && m.F_Fixed32 != nil {
- return *m.F_Fixed32
- }
- return Default_Defaults_F_Fixed32
-}
-
-func (m *Defaults) GetF_Fixed64() uint64 {
- if m != nil && m.F_Fixed64 != nil {
- return *m.F_Fixed64
- }
- return Default_Defaults_F_Fixed64
-}
-
-func (m *Defaults) GetF_Uint32() uint32 {
- if m != nil && m.F_Uint32 != nil {
- return *m.F_Uint32
- }
- return Default_Defaults_F_Uint32
-}
-
-func (m *Defaults) GetF_Uint64() uint64 {
- if m != nil && m.F_Uint64 != nil {
- return *m.F_Uint64
- }
- return Default_Defaults_F_Uint64
-}
-
-func (m *Defaults) GetF_Float() float32 {
- if m != nil && m.F_Float != nil {
- return *m.F_Float
- }
- return Default_Defaults_F_Float
-}
-
-func (m *Defaults) GetF_Double() float64 {
- if m != nil && m.F_Double != nil {
- return *m.F_Double
- }
- return Default_Defaults_F_Double
-}
-
-func (m *Defaults) GetF_String() string {
- if m != nil && m.F_String != nil {
- return *m.F_String
- }
- return Default_Defaults_F_String
-}
-
-func (m *Defaults) GetF_Bytes() []byte {
- if m != nil && m.F_Bytes != nil {
- return m.F_Bytes
- }
- return append([]byte(nil), Default_Defaults_F_Bytes...)
-}
-
-func (m *Defaults) GetF_Sint32() int32 {
- if m != nil && m.F_Sint32 != nil {
- return *m.F_Sint32
- }
- return Default_Defaults_F_Sint32
-}
-
-func (m *Defaults) GetF_Sint64() int64 {
- if m != nil && m.F_Sint64 != nil {
- return *m.F_Sint64
- }
- return Default_Defaults_F_Sint64
-}
-
-func (m *Defaults) GetF_Enum() Defaults_Color {
- if m != nil && m.F_Enum != nil {
- return *m.F_Enum
- }
- return Default_Defaults_F_Enum
-}
-
-func (m *Defaults) GetF_Pinf() float32 {
- if m != nil && m.F_Pinf != nil {
- return *m.F_Pinf
- }
- return Default_Defaults_F_Pinf
-}
-
-func (m *Defaults) GetF_Ninf() float32 {
- if m != nil && m.F_Ninf != nil {
- return *m.F_Ninf
- }
- return Default_Defaults_F_Ninf
-}
-
-func (m *Defaults) GetF_Nan() float32 {
- if m != nil && m.F_Nan != nil {
- return *m.F_Nan
- }
- return Default_Defaults_F_Nan
-}
-
-func (m *Defaults) GetSub() *SubDefaults {
- if m != nil {
- return m.Sub
- }
- return nil
-}
-
-func (m *Defaults) GetStrZero() string {
- if m != nil && m.StrZero != nil {
- return *m.StrZero
- }
- return ""
-}
-
-type SubDefaults struct {
- N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *SubDefaults) Reset() { *m = SubDefaults{} }
-func (m *SubDefaults) String() string { return proto.CompactTextString(m) }
-func (*SubDefaults) ProtoMessage() {}
-func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
-
-const Default_SubDefaults_N int64 = 7
-
-func (m *SubDefaults) GetN() int64 {
- if m != nil && m.N != nil {
- return *m.N
- }
- return Default_SubDefaults_N
-}
-
-type RepeatedEnum struct {
- Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} }
-func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) }
-func (*RepeatedEnum) ProtoMessage() {}
-func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
-
-func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color {
- if m != nil {
- return m.Color
- }
- return nil
-}
-
-type MoreRepeated struct {
- Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"`
- BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"`
- Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"`
- IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"`
- Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"`
- Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"`
- Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MoreRepeated) Reset() { *m = MoreRepeated{} }
-func (m *MoreRepeated) String() string { return proto.CompactTextString(m) }
-func (*MoreRepeated) ProtoMessage() {}
-func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
-
-func (m *MoreRepeated) GetBools() []bool {
- if m != nil {
- return m.Bools
- }
- return nil
-}
-
-func (m *MoreRepeated) GetBoolsPacked() []bool {
- if m != nil {
- return m.BoolsPacked
- }
- return nil
-}
-
-func (m *MoreRepeated) GetInts() []int32 {
- if m != nil {
- return m.Ints
- }
- return nil
-}
-
-func (m *MoreRepeated) GetIntsPacked() []int32 {
- if m != nil {
- return m.IntsPacked
- }
- return nil
-}
-
-func (m *MoreRepeated) GetInt64SPacked() []int64 {
- if m != nil {
- return m.Int64SPacked
- }
- return nil
-}
-
-func (m *MoreRepeated) GetStrings() []string {
- if m != nil {
- return m.Strings
- }
- return nil
-}
-
-func (m *MoreRepeated) GetFixeds() []uint32 {
- if m != nil {
- return m.Fixeds
- }
- return nil
-}
-
-type GroupOld struct {
- G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GroupOld) Reset() { *m = GroupOld{} }
-func (m *GroupOld) String() string { return proto.CompactTextString(m) }
-func (*GroupOld) ProtoMessage() {}
-func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
-
-func (m *GroupOld) GetG() *GroupOld_G {
- if m != nil {
- return m.G
- }
- return nil
-}
-
-type GroupOld_G struct {
- X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GroupOld_G) Reset() { *m = GroupOld_G{} }
-func (m *GroupOld_G) String() string { return proto.CompactTextString(m) }
-func (*GroupOld_G) ProtoMessage() {}
-func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} }
-
-func (m *GroupOld_G) GetX() int32 {
- if m != nil && m.X != nil {
- return *m.X
- }
- return 0
-}
-
-type GroupNew struct {
- G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GroupNew) Reset() { *m = GroupNew{} }
-func (m *GroupNew) String() string { return proto.CompactTextString(m) }
-func (*GroupNew) ProtoMessage() {}
-func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
-
-func (m *GroupNew) GetG() *GroupNew_G {
- if m != nil {
- return m.G
- }
- return nil
-}
-
-type GroupNew_G struct {
- X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
- Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *GroupNew_G) Reset() { *m = GroupNew_G{} }
-func (m *GroupNew_G) String() string { return proto.CompactTextString(m) }
-func (*GroupNew_G) ProtoMessage() {}
-func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 0} }
-
-func (m *GroupNew_G) GetX() int32 {
- if m != nil && m.X != nil {
- return *m.X
- }
- return 0
-}
-
-func (m *GroupNew_G) GetY() int32 {
- if m != nil && m.Y != nil {
- return *m.Y
- }
- return 0
-}
-
-type FloatingPoint struct {
- F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"`
- Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *FloatingPoint) Reset() { *m = FloatingPoint{} }
-func (m *FloatingPoint) String() string { return proto.CompactTextString(m) }
-func (*FloatingPoint) ProtoMessage() {}
-func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
-
-func (m *FloatingPoint) GetF() float64 {
- if m != nil && m.F != nil {
- return *m.F
- }
- return 0
-}
-
-func (m *FloatingPoint) GetExact() bool {
- if m != nil && m.Exact != nil {
- return *m.Exact
- }
- return false
-}
-
-type MessageWithMap struct {
- NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
-func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
-func (*MessageWithMap) ProtoMessage() {}
-func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
-
-func (m *MessageWithMap) GetNameMapping() map[int32]string {
- if m != nil {
- return m.NameMapping
- }
- return nil
-}
-
-func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint {
- if m != nil {
- return m.MsgMapping
- }
- return nil
-}
-
-func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
- if m != nil {
- return m.ByteMapping
- }
- return nil
-}
-
-func (m *MessageWithMap) GetStrToStr() map[string]string {
- if m != nil {
- return m.StrToStr
- }
- return nil
-}
-
-type Oneof struct {
- // Types that are valid to be assigned to Union:
- // *Oneof_F_Bool
- // *Oneof_F_Int32
- // *Oneof_F_Int64
- // *Oneof_F_Fixed32
- // *Oneof_F_Fixed64
- // *Oneof_F_Uint32
- // *Oneof_F_Uint64
- // *Oneof_F_Float
- // *Oneof_F_Double
- // *Oneof_F_String
- // *Oneof_F_Bytes
- // *Oneof_F_Sint32
- // *Oneof_F_Sint64
- // *Oneof_F_Enum
- // *Oneof_F_Message
- // *Oneof_FGroup
- // *Oneof_F_Largest_Tag
- Union isOneof_Union `protobuf_oneof:"union"`
- // Types that are valid to be assigned to Tormato:
- // *Oneof_Value
- Tormato isOneof_Tormato `protobuf_oneof:"tormato"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Oneof) Reset() { *m = Oneof{} }
-func (m *Oneof) String() string { return proto.CompactTextString(m) }
-func (*Oneof) ProtoMessage() {}
-func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
-
-type isOneof_Union interface {
- isOneof_Union()
-}
-type isOneof_Tormato interface {
- isOneof_Tormato()
-}
-
-type Oneof_F_Bool struct {
- F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=fBool,oneof"`
-}
-type Oneof_F_Int32 struct {
- F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=fInt32,oneof"`
-}
-type Oneof_F_Int64 struct {
- F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=fInt64,oneof"`
-}
-type Oneof_F_Fixed32 struct {
- F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=fFixed32,oneof"`
-}
-type Oneof_F_Fixed64 struct {
- F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=fFixed64,oneof"`
-}
-type Oneof_F_Uint32 struct {
- F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=fUint32,oneof"`
-}
-type Oneof_F_Uint64 struct {
- F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=fUint64,oneof"`
-}
-type Oneof_F_Float struct {
- F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=fFloat,oneof"`
-}
-type Oneof_F_Double struct {
- F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=fDouble,oneof"`
-}
-type Oneof_F_String struct {
- F_String string `protobuf:"bytes,10,opt,name=F_String,json=fString,oneof"`
-}
-type Oneof_F_Bytes struct {
- F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=fBytes,oneof"`
-}
-type Oneof_F_Sint32 struct {
- F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=fSint32,oneof"`
-}
-type Oneof_F_Sint64 struct {
- F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=fSint64,oneof"`
-}
-type Oneof_F_Enum struct {
- F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=fEnum,enum=testdata.MyMessage_Color,oneof"`
-}
-type Oneof_F_Message struct {
- F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=fMessage,oneof"`
-}
-type Oneof_FGroup struct {
- FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"`
-}
-type Oneof_F_Largest_Tag struct {
- F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=fLargestTag,oneof"`
-}
-type Oneof_Value struct {
- Value int32 `protobuf:"varint,100,opt,name=value,oneof"`
-}
-
-func (*Oneof_F_Bool) isOneof_Union() {}
-func (*Oneof_F_Int32) isOneof_Union() {}
-func (*Oneof_F_Int64) isOneof_Union() {}
-func (*Oneof_F_Fixed32) isOneof_Union() {}
-func (*Oneof_F_Fixed64) isOneof_Union() {}
-func (*Oneof_F_Uint32) isOneof_Union() {}
-func (*Oneof_F_Uint64) isOneof_Union() {}
-func (*Oneof_F_Float) isOneof_Union() {}
-func (*Oneof_F_Double) isOneof_Union() {}
-func (*Oneof_F_String) isOneof_Union() {}
-func (*Oneof_F_Bytes) isOneof_Union() {}
-func (*Oneof_F_Sint32) isOneof_Union() {}
-func (*Oneof_F_Sint64) isOneof_Union() {}
-func (*Oneof_F_Enum) isOneof_Union() {}
-func (*Oneof_F_Message) isOneof_Union() {}
-func (*Oneof_FGroup) isOneof_Union() {}
-func (*Oneof_F_Largest_Tag) isOneof_Union() {}
-func (*Oneof_Value) isOneof_Tormato() {}
-
-func (m *Oneof) GetUnion() isOneof_Union {
- if m != nil {
- return m.Union
- }
- return nil
-}
-func (m *Oneof) GetTormato() isOneof_Tormato {
- if m != nil {
- return m.Tormato
- }
- return nil
-}
-
-func (m *Oneof) GetF_Bool() bool {
- if x, ok := m.GetUnion().(*Oneof_F_Bool); ok {
- return x.F_Bool
- }
- return false
-}
-
-func (m *Oneof) GetF_Int32() int32 {
- if x, ok := m.GetUnion().(*Oneof_F_Int32); ok {
- return x.F_Int32
- }
- return 0
-}
-
-func (m *Oneof) GetF_Int64() int64 {
- if x, ok := m.GetUnion().(*Oneof_F_Int64); ok {
- return x.F_Int64
- }
- return 0
-}
-
-func (m *Oneof) GetF_Fixed32() uint32 {
- if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok {
- return x.F_Fixed32
- }
- return 0
-}
-
-func (m *Oneof) GetF_Fixed64() uint64 {
- if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok {
- return x.F_Fixed64
- }
- return 0
-}
-
-func (m *Oneof) GetF_Uint32() uint32 {
- if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok {
- return x.F_Uint32
- }
- return 0
-}
-
-func (m *Oneof) GetF_Uint64() uint64 {
- if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok {
- return x.F_Uint64
- }
- return 0
-}
-
-func (m *Oneof) GetF_Float() float32 {
- if x, ok := m.GetUnion().(*Oneof_F_Float); ok {
- return x.F_Float
- }
- return 0
-}
-
-func (m *Oneof) GetF_Double() float64 {
- if x, ok := m.GetUnion().(*Oneof_F_Double); ok {
- return x.F_Double
- }
- return 0
-}
-
-func (m *Oneof) GetF_String() string {
- if x, ok := m.GetUnion().(*Oneof_F_String); ok {
- return x.F_String
- }
- return ""
-}
-
-func (m *Oneof) GetF_Bytes() []byte {
- if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok {
- return x.F_Bytes
- }
- return nil
-}
-
-func (m *Oneof) GetF_Sint32() int32 {
- if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok {
- return x.F_Sint32
- }
- return 0
-}
-
-func (m *Oneof) GetF_Sint64() int64 {
- if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok {
- return x.F_Sint64
- }
- return 0
-}
-
-func (m *Oneof) GetF_Enum() MyMessage_Color {
- if x, ok := m.GetUnion().(*Oneof_F_Enum); ok {
- return x.F_Enum
- }
- return MyMessage_RED
-}
-
-func (m *Oneof) GetF_Message() *GoTestField {
- if x, ok := m.GetUnion().(*Oneof_F_Message); ok {
- return x.F_Message
- }
- return nil
-}
-
-func (m *Oneof) GetFGroup() *Oneof_F_Group {
- if x, ok := m.GetUnion().(*Oneof_FGroup); ok {
- return x.FGroup
- }
- return nil
-}
-
-func (m *Oneof) GetF_Largest_Tag() int32 {
- if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok {
- return x.F_Largest_Tag
- }
- return 0
-}
-
-func (m *Oneof) GetValue() int32 {
- if x, ok := m.GetTormato().(*Oneof_Value); ok {
- return x.Value
- }
- return 0
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{
- (*Oneof_F_Bool)(nil),
- (*Oneof_F_Int32)(nil),
- (*Oneof_F_Int64)(nil),
- (*Oneof_F_Fixed32)(nil),
- (*Oneof_F_Fixed64)(nil),
- (*Oneof_F_Uint32)(nil),
- (*Oneof_F_Uint64)(nil),
- (*Oneof_F_Float)(nil),
- (*Oneof_F_Double)(nil),
- (*Oneof_F_String)(nil),
- (*Oneof_F_Bytes)(nil),
- (*Oneof_F_Sint32)(nil),
- (*Oneof_F_Sint64)(nil),
- (*Oneof_F_Enum)(nil),
- (*Oneof_F_Message)(nil),
- (*Oneof_FGroup)(nil),
- (*Oneof_F_Largest_Tag)(nil),
- (*Oneof_Value)(nil),
- }
-}
-
-func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*Oneof)
- // union
- switch x := m.Union.(type) {
- case *Oneof_F_Bool:
- t := uint64(0)
- if x.F_Bool {
- t = 1
- }
- b.EncodeVarint(1<<3 | proto.WireVarint)
- b.EncodeVarint(t)
- case *Oneof_F_Int32:
- b.EncodeVarint(2<<3 | proto.WireVarint)
- b.EncodeVarint(uint64(x.F_Int32))
- case *Oneof_F_Int64:
- b.EncodeVarint(3<<3 | proto.WireVarint)
- b.EncodeVarint(uint64(x.F_Int64))
- case *Oneof_F_Fixed32:
- b.EncodeVarint(4<<3 | proto.WireFixed32)
- b.EncodeFixed32(uint64(x.F_Fixed32))
- case *Oneof_F_Fixed64:
- b.EncodeVarint(5<<3 | proto.WireFixed64)
- b.EncodeFixed64(uint64(x.F_Fixed64))
- case *Oneof_F_Uint32:
- b.EncodeVarint(6<<3 | proto.WireVarint)
- b.EncodeVarint(uint64(x.F_Uint32))
- case *Oneof_F_Uint64:
- b.EncodeVarint(7<<3 | proto.WireVarint)
- b.EncodeVarint(uint64(x.F_Uint64))
- case *Oneof_F_Float:
- b.EncodeVarint(8<<3 | proto.WireFixed32)
- b.EncodeFixed32(uint64(math.Float32bits(x.F_Float)))
- case *Oneof_F_Double:
- b.EncodeVarint(9<<3 | proto.WireFixed64)
- b.EncodeFixed64(math.Float64bits(x.F_Double))
- case *Oneof_F_String:
- b.EncodeVarint(10<<3 | proto.WireBytes)
- b.EncodeStringBytes(x.F_String)
- case *Oneof_F_Bytes:
- b.EncodeVarint(11<<3 | proto.WireBytes)
- b.EncodeRawBytes(x.F_Bytes)
- case *Oneof_F_Sint32:
- b.EncodeVarint(12<<3 | proto.WireVarint)
- b.EncodeZigzag32(uint64(x.F_Sint32))
- case *Oneof_F_Sint64:
- b.EncodeVarint(13<<3 | proto.WireVarint)
- b.EncodeZigzag64(uint64(x.F_Sint64))
- case *Oneof_F_Enum:
- b.EncodeVarint(14<<3 | proto.WireVarint)
- b.EncodeVarint(uint64(x.F_Enum))
- case *Oneof_F_Message:
- b.EncodeVarint(15<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.F_Message); err != nil {
- return err
- }
- case *Oneof_FGroup:
- b.EncodeVarint(16<<3 | proto.WireStartGroup)
- if err := b.Marshal(x.FGroup); err != nil {
- return err
- }
- b.EncodeVarint(16<<3 | proto.WireEndGroup)
- case *Oneof_F_Largest_Tag:
- b.EncodeVarint(536870911<<3 | proto.WireVarint)
- b.EncodeVarint(uint64(x.F_Largest_Tag))
- case nil:
- default:
- return fmt.Errorf("Oneof.Union has unexpected type %T", x)
- }
- // tormato
- switch x := m.Tormato.(type) {
- case *Oneof_Value:
- b.EncodeVarint(100<<3 | proto.WireVarint)
- b.EncodeVarint(uint64(x.Value))
- case nil:
- default:
- return fmt.Errorf("Oneof.Tormato has unexpected type %T", x)
- }
- return nil
-}
-
-func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*Oneof)
- switch tag {
- case 1: // union.F_Bool
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Union = &Oneof_F_Bool{x != 0}
- return true, err
- case 2: // union.F_Int32
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Union = &Oneof_F_Int32{int32(x)}
- return true, err
- case 3: // union.F_Int64
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Union = &Oneof_F_Int64{int64(x)}
- return true, err
- case 4: // union.F_Fixed32
- if wire != proto.WireFixed32 {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeFixed32()
- m.Union = &Oneof_F_Fixed32{uint32(x)}
- return true, err
- case 5: // union.F_Fixed64
- if wire != proto.WireFixed64 {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeFixed64()
- m.Union = &Oneof_F_Fixed64{x}
- return true, err
- case 6: // union.F_Uint32
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Union = &Oneof_F_Uint32{uint32(x)}
- return true, err
- case 7: // union.F_Uint64
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Union = &Oneof_F_Uint64{x}
- return true, err
- case 8: // union.F_Float
- if wire != proto.WireFixed32 {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeFixed32()
- m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))}
- return true, err
- case 9: // union.F_Double
- if wire != proto.WireFixed64 {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeFixed64()
- m.Union = &Oneof_F_Double{math.Float64frombits(x)}
- return true, err
- case 10: // union.F_String
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeStringBytes()
- m.Union = &Oneof_F_String{x}
- return true, err
- case 11: // union.F_Bytes
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeRawBytes(true)
- m.Union = &Oneof_F_Bytes{x}
- return true, err
- case 12: // union.F_Sint32
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeZigzag32()
- m.Union = &Oneof_F_Sint32{int32(x)}
- return true, err
- case 13: // union.F_Sint64
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeZigzag64()
- m.Union = &Oneof_F_Sint64{int64(x)}
- return true, err
- case 14: // union.F_Enum
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Union = &Oneof_F_Enum{MyMessage_Color(x)}
- return true, err
- case 15: // union.F_Message
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(GoTestField)
- err := b.DecodeMessage(msg)
- m.Union = &Oneof_F_Message{msg}
- return true, err
- case 16: // union.f_group
- if wire != proto.WireStartGroup {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(Oneof_F_Group)
- err := b.DecodeGroup(msg)
- m.Union = &Oneof_FGroup{msg}
- return true, err
- case 536870911: // union.F_Largest_Tag
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Union = &Oneof_F_Largest_Tag{int32(x)}
- return true, err
- case 100: // tormato.value
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Tormato = &Oneof_Value{int32(x)}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _Oneof_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*Oneof)
- // union
- switch x := m.Union.(type) {
- case *Oneof_F_Bool:
- n += proto.SizeVarint(1<<3 | proto.WireVarint)
- n += 1
- case *Oneof_F_Int32:
- n += proto.SizeVarint(2<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64(x.F_Int32))
- case *Oneof_F_Int64:
- n += proto.SizeVarint(3<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64(x.F_Int64))
- case *Oneof_F_Fixed32:
- n += proto.SizeVarint(4<<3 | proto.WireFixed32)
- n += 4
- case *Oneof_F_Fixed64:
- n += proto.SizeVarint(5<<3 | proto.WireFixed64)
- n += 8
- case *Oneof_F_Uint32:
- n += proto.SizeVarint(6<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64(x.F_Uint32))
- case *Oneof_F_Uint64:
- n += proto.SizeVarint(7<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64(x.F_Uint64))
- case *Oneof_F_Float:
- n += proto.SizeVarint(8<<3 | proto.WireFixed32)
- n += 4
- case *Oneof_F_Double:
- n += proto.SizeVarint(9<<3 | proto.WireFixed64)
- n += 8
- case *Oneof_F_String:
- n += proto.SizeVarint(10<<3 | proto.WireBytes)
- n += proto.SizeVarint(uint64(len(x.F_String)))
- n += len(x.F_String)
- case *Oneof_F_Bytes:
- n += proto.SizeVarint(11<<3 | proto.WireBytes)
- n += proto.SizeVarint(uint64(len(x.F_Bytes)))
- n += len(x.F_Bytes)
- case *Oneof_F_Sint32:
- n += proto.SizeVarint(12<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31))))
- case *Oneof_F_Sint64:
- n += proto.SizeVarint(13<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63))))
- case *Oneof_F_Enum:
- n += proto.SizeVarint(14<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64(x.F_Enum))
- case *Oneof_F_Message:
- s := proto.Size(x.F_Message)
- n += proto.SizeVarint(15<<3 | proto.WireBytes)
- n += proto.SizeVarint(uint64(s))
- n += s
- case *Oneof_FGroup:
- n += proto.SizeVarint(16<<3 | proto.WireStartGroup)
- n += proto.Size(x.FGroup)
- n += proto.SizeVarint(16<<3 | proto.WireEndGroup)
- case *Oneof_F_Largest_Tag:
- n += proto.SizeVarint(536870911<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64(x.F_Largest_Tag))
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- // tormato
- switch x := m.Tormato.(type) {
- case *Oneof_Value:
- n += proto.SizeVarint(100<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64(x.Value))
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
-type Oneof_F_Group struct {
- X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} }
-func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) }
-func (*Oneof_F_Group) ProtoMessage() {}
-func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29, 0} }
-
-func (m *Oneof_F_Group) GetX() int32 {
- if m != nil && m.X != nil {
- return *m.X
- }
- return 0
-}
-
-type Communique struct {
- MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"`
- // This is a oneof, called "union".
- //
- // Types that are valid to be assigned to Union:
- // *Communique_Number
- // *Communique_Name
- // *Communique_Data
- // *Communique_TempC
- // *Communique_Col
- // *Communique_Msg
- Union isCommunique_Union `protobuf_oneof:"union"`
- XXX_unrecognized []byte `json:"-"`
-}
-
-func (m *Communique) Reset() { *m = Communique{} }
-func (m *Communique) String() string { return proto.CompactTextString(m) }
-func (*Communique) ProtoMessage() {}
-func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
-
-type isCommunique_Union interface {
- isCommunique_Union()
-}
-
-type Communique_Number struct {
- Number int32 `protobuf:"varint,5,opt,name=number,oneof"`
-}
-type Communique_Name struct {
- Name string `protobuf:"bytes,6,opt,name=name,oneof"`
-}
-type Communique_Data struct {
- Data []byte `protobuf:"bytes,7,opt,name=data,oneof"`
-}
-type Communique_TempC struct {
- TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"`
-}
-type Communique_Col struct {
- Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"`
-}
-type Communique_Msg struct {
- Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"`
-}
-
-func (*Communique_Number) isCommunique_Union() {}
-func (*Communique_Name) isCommunique_Union() {}
-func (*Communique_Data) isCommunique_Union() {}
-func (*Communique_TempC) isCommunique_Union() {}
-func (*Communique_Col) isCommunique_Union() {}
-func (*Communique_Msg) isCommunique_Union() {}
-
-func (m *Communique) GetUnion() isCommunique_Union {
- if m != nil {
- return m.Union
- }
- return nil
-}
-
-func (m *Communique) GetMakeMeCry() bool {
- if m != nil && m.MakeMeCry != nil {
- return *m.MakeMeCry
- }
- return false
-}
-
-func (m *Communique) GetNumber() int32 {
- if x, ok := m.GetUnion().(*Communique_Number); ok {
- return x.Number
- }
- return 0
-}
-
-func (m *Communique) GetName() string {
- if x, ok := m.GetUnion().(*Communique_Name); ok {
- return x.Name
- }
- return ""
-}
-
-func (m *Communique) GetData() []byte {
- if x, ok := m.GetUnion().(*Communique_Data); ok {
- return x.Data
- }
- return nil
-}
-
-func (m *Communique) GetTempC() float64 {
- if x, ok := m.GetUnion().(*Communique_TempC); ok {
- return x.TempC
- }
- return 0
-}
-
-func (m *Communique) GetCol() MyMessage_Color {
- if x, ok := m.GetUnion().(*Communique_Col); ok {
- return x.Col
- }
- return MyMessage_RED
-}
-
-func (m *Communique) GetMsg() *Strings {
- if x, ok := m.GetUnion().(*Communique_Msg); ok {
- return x.Msg
- }
- return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{
- (*Communique_Number)(nil),
- (*Communique_Name)(nil),
- (*Communique_Data)(nil),
- (*Communique_TempC)(nil),
- (*Communique_Col)(nil),
- (*Communique_Msg)(nil),
- }
-}
-
-func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*Communique)
- // union
- switch x := m.Union.(type) {
- case *Communique_Number:
- b.EncodeVarint(5<<3 | proto.WireVarint)
- b.EncodeVarint(uint64(x.Number))
- case *Communique_Name:
- b.EncodeVarint(6<<3 | proto.WireBytes)
- b.EncodeStringBytes(x.Name)
- case *Communique_Data:
- b.EncodeVarint(7<<3 | proto.WireBytes)
- b.EncodeRawBytes(x.Data)
- case *Communique_TempC:
- b.EncodeVarint(8<<3 | proto.WireFixed64)
- b.EncodeFixed64(math.Float64bits(x.TempC))
- case *Communique_Col:
- b.EncodeVarint(9<<3 | proto.WireVarint)
- b.EncodeVarint(uint64(x.Col))
- case *Communique_Msg:
- b.EncodeVarint(10<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.Msg); err != nil {
- return err
- }
- case nil:
- default:
- return fmt.Errorf("Communique.Union has unexpected type %T", x)
- }
- return nil
-}
-
-func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*Communique)
- switch tag {
- case 5: // union.number
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Union = &Communique_Number{int32(x)}
- return true, err
- case 6: // union.name
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeStringBytes()
- m.Union = &Communique_Name{x}
- return true, err
- case 7: // union.data
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeRawBytes(true)
- m.Union = &Communique_Data{x}
- return true, err
- case 8: // union.temp_c
- if wire != proto.WireFixed64 {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeFixed64()
- m.Union = &Communique_TempC{math.Float64frombits(x)}
- return true, err
- case 9: // union.col
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.Union = &Communique_Col{MyMessage_Color(x)}
- return true, err
- case 10: // union.msg
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(Strings)
- err := b.DecodeMessage(msg)
- m.Union = &Communique_Msg{msg}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _Communique_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*Communique)
- // union
- switch x := m.Union.(type) {
- case *Communique_Number:
- n += proto.SizeVarint(5<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64(x.Number))
- case *Communique_Name:
- n += proto.SizeVarint(6<<3 | proto.WireBytes)
- n += proto.SizeVarint(uint64(len(x.Name)))
- n += len(x.Name)
- case *Communique_Data:
- n += proto.SizeVarint(7<<3 | proto.WireBytes)
- n += proto.SizeVarint(uint64(len(x.Data)))
- n += len(x.Data)
- case *Communique_TempC:
- n += proto.SizeVarint(8<<3 | proto.WireFixed64)
- n += 8
- case *Communique_Col:
- n += proto.SizeVarint(9<<3 | proto.WireVarint)
- n += proto.SizeVarint(uint64(x.Col))
- case *Communique_Msg:
- s := proto.Size(x.Msg)
- n += proto.SizeVarint(10<<3 | proto.WireBytes)
- n += proto.SizeVarint(uint64(s))
- n += s
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
-var E_Greeting = &proto.ExtensionDesc{
- ExtendedType: (*MyMessage)(nil),
- ExtensionType: ([]string)(nil),
- Field: 106,
- Name: "testdata.greeting",
- Tag: "bytes,106,rep,name=greeting",
-}
-
-var E_Complex = &proto.ExtensionDesc{
- ExtendedType: (*OtherMessage)(nil),
- ExtensionType: (*ComplexExtension)(nil),
- Field: 200,
- Name: "testdata.complex",
- Tag: "bytes,200,opt,name=complex",
-}
-
-var E_RComplex = &proto.ExtensionDesc{
- ExtendedType: (*OtherMessage)(nil),
- ExtensionType: ([]*ComplexExtension)(nil),
- Field: 201,
- Name: "testdata.r_complex",
- Tag: "bytes,201,rep,name=r_complex,json=rComplex",
-}
-
-var E_NoDefaultDouble = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*float64)(nil),
- Field: 101,
- Name: "testdata.no_default_double",
- Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble",
-}
-
-var E_NoDefaultFloat = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*float32)(nil),
- Field: 102,
- Name: "testdata.no_default_float",
- Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat",
-}
-
-var E_NoDefaultInt32 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int32)(nil),
- Field: 103,
- Name: "testdata.no_default_int32",
- Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32",
-}
-
-var E_NoDefaultInt64 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int64)(nil),
- Field: 104,
- Name: "testdata.no_default_int64",
- Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64",
-}
-
-var E_NoDefaultUint32 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*uint32)(nil),
- Field: 105,
- Name: "testdata.no_default_uint32",
- Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32",
-}
-
-var E_NoDefaultUint64 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*uint64)(nil),
- Field: 106,
- Name: "testdata.no_default_uint64",
- Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64",
-}
-
-var E_NoDefaultSint32 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int32)(nil),
- Field: 107,
- Name: "testdata.no_default_sint32",
- Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32",
-}
-
-var E_NoDefaultSint64 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int64)(nil),
- Field: 108,
- Name: "testdata.no_default_sint64",
- Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64",
-}
-
-var E_NoDefaultFixed32 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*uint32)(nil),
- Field: 109,
- Name: "testdata.no_default_fixed32",
- Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32",
-}
-
-var E_NoDefaultFixed64 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*uint64)(nil),
- Field: 110,
- Name: "testdata.no_default_fixed64",
- Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64",
-}
-
-var E_NoDefaultSfixed32 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int32)(nil),
- Field: 111,
- Name: "testdata.no_default_sfixed32",
- Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32",
-}
-
-var E_NoDefaultSfixed64 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int64)(nil),
- Field: 112,
- Name: "testdata.no_default_sfixed64",
- Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64",
-}
-
-var E_NoDefaultBool = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*bool)(nil),
- Field: 113,
- Name: "testdata.no_default_bool",
- Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool",
-}
-
-var E_NoDefaultString = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*string)(nil),
- Field: 114,
- Name: "testdata.no_default_string",
- Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString",
-}
-
-var E_NoDefaultBytes = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: ([]byte)(nil),
- Field: 115,
- Name: "testdata.no_default_bytes",
- Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes",
-}
-
-var E_NoDefaultEnum = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil),
- Field: 116,
- Name: "testdata.no_default_enum",
- Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum",
-}
-
-var E_DefaultDouble = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*float64)(nil),
- Field: 201,
- Name: "testdata.default_double",
- Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415",
-}
-
-var E_DefaultFloat = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*float32)(nil),
- Field: 202,
- Name: "testdata.default_float",
- Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14",
-}
-
-var E_DefaultInt32 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int32)(nil),
- Field: 203,
- Name: "testdata.default_int32",
- Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42",
-}
-
-var E_DefaultInt64 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int64)(nil),
- Field: 204,
- Name: "testdata.default_int64",
- Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43",
-}
-
-var E_DefaultUint32 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*uint32)(nil),
- Field: 205,
- Name: "testdata.default_uint32",
- Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44",
-}
-
-var E_DefaultUint64 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*uint64)(nil),
- Field: 206,
- Name: "testdata.default_uint64",
- Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45",
-}
-
-var E_DefaultSint32 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int32)(nil),
- Field: 207,
- Name: "testdata.default_sint32",
- Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46",
-}
-
-var E_DefaultSint64 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int64)(nil),
- Field: 208,
- Name: "testdata.default_sint64",
- Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47",
-}
-
-var E_DefaultFixed32 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*uint32)(nil),
- Field: 209,
- Name: "testdata.default_fixed32",
- Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48",
-}
-
-var E_DefaultFixed64 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*uint64)(nil),
- Field: 210,
- Name: "testdata.default_fixed64",
- Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49",
-}
-
-var E_DefaultSfixed32 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int32)(nil),
- Field: 211,
- Name: "testdata.default_sfixed32",
- Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50",
-}
-
-var E_DefaultSfixed64 = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*int64)(nil),
- Field: 212,
- Name: "testdata.default_sfixed64",
- Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51",
-}
-
-var E_DefaultBool = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*bool)(nil),
- Field: 213,
- Name: "testdata.default_bool",
- Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1",
-}
-
-var E_DefaultString = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*string)(nil),
- Field: 214,
- Name: "testdata.default_string",
- Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string",
-}
-
-var E_DefaultBytes = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: ([]byte)(nil),
- Field: 215,
- Name: "testdata.default_bytes",
- Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes",
-}
-
-var E_DefaultEnum = &proto.ExtensionDesc{
- ExtendedType: (*DefaultsMessage)(nil),
- ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil),
- Field: 216,
- Name: "testdata.default_enum",
- Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1",
-}
-
-var E_X201 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 201,
- Name: "testdata.x201",
- Tag: "bytes,201,opt,name=x201",
-}
-
-var E_X202 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 202,
- Name: "testdata.x202",
- Tag: "bytes,202,opt,name=x202",
-}
-
-var E_X203 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 203,
- Name: "testdata.x203",
- Tag: "bytes,203,opt,name=x203",
-}
-
-var E_X204 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 204,
- Name: "testdata.x204",
- Tag: "bytes,204,opt,name=x204",
-}
-
-var E_X205 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 205,
- Name: "testdata.x205",
- Tag: "bytes,205,opt,name=x205",
-}
-
-var E_X206 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 206,
- Name: "testdata.x206",
- Tag: "bytes,206,opt,name=x206",
-}
-
-var E_X207 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 207,
- Name: "testdata.x207",
- Tag: "bytes,207,opt,name=x207",
-}
-
-var E_X208 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 208,
- Name: "testdata.x208",
- Tag: "bytes,208,opt,name=x208",
-}
-
-var E_X209 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 209,
- Name: "testdata.x209",
- Tag: "bytes,209,opt,name=x209",
-}
-
-var E_X210 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 210,
- Name: "testdata.x210",
- Tag: "bytes,210,opt,name=x210",
-}
-
-var E_X211 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 211,
- Name: "testdata.x211",
- Tag: "bytes,211,opt,name=x211",
-}
-
-var E_X212 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 212,
- Name: "testdata.x212",
- Tag: "bytes,212,opt,name=x212",
-}
-
-var E_X213 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 213,
- Name: "testdata.x213",
- Tag: "bytes,213,opt,name=x213",
-}
-
-var E_X214 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 214,
- Name: "testdata.x214",
- Tag: "bytes,214,opt,name=x214",
-}
-
-var E_X215 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 215,
- Name: "testdata.x215",
- Tag: "bytes,215,opt,name=x215",
-}
-
-var E_X216 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 216,
- Name: "testdata.x216",
- Tag: "bytes,216,opt,name=x216",
-}
-
-var E_X217 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 217,
- Name: "testdata.x217",
- Tag: "bytes,217,opt,name=x217",
-}
-
-var E_X218 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 218,
- Name: "testdata.x218",
- Tag: "bytes,218,opt,name=x218",
-}
-
-var E_X219 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 219,
- Name: "testdata.x219",
- Tag: "bytes,219,opt,name=x219",
-}
-
-var E_X220 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 220,
- Name: "testdata.x220",
- Tag: "bytes,220,opt,name=x220",
-}
-
-var E_X221 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 221,
- Name: "testdata.x221",
- Tag: "bytes,221,opt,name=x221",
-}
-
-var E_X222 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 222,
- Name: "testdata.x222",
- Tag: "bytes,222,opt,name=x222",
-}
-
-var E_X223 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 223,
- Name: "testdata.x223",
- Tag: "bytes,223,opt,name=x223",
-}
-
-var E_X224 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 224,
- Name: "testdata.x224",
- Tag: "bytes,224,opt,name=x224",
-}
-
-var E_X225 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 225,
- Name: "testdata.x225",
- Tag: "bytes,225,opt,name=x225",
-}
-
-var E_X226 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 226,
- Name: "testdata.x226",
- Tag: "bytes,226,opt,name=x226",
-}
-
-var E_X227 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 227,
- Name: "testdata.x227",
- Tag: "bytes,227,opt,name=x227",
-}
-
-var E_X228 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 228,
- Name: "testdata.x228",
- Tag: "bytes,228,opt,name=x228",
-}
-
-var E_X229 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 229,
- Name: "testdata.x229",
- Tag: "bytes,229,opt,name=x229",
-}
-
-var E_X230 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 230,
- Name: "testdata.x230",
- Tag: "bytes,230,opt,name=x230",
-}
-
-var E_X231 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 231,
- Name: "testdata.x231",
- Tag: "bytes,231,opt,name=x231",
-}
-
-var E_X232 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 232,
- Name: "testdata.x232",
- Tag: "bytes,232,opt,name=x232",
-}
-
-var E_X233 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 233,
- Name: "testdata.x233",
- Tag: "bytes,233,opt,name=x233",
-}
-
-var E_X234 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 234,
- Name: "testdata.x234",
- Tag: "bytes,234,opt,name=x234",
-}
-
-var E_X235 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 235,
- Name: "testdata.x235",
- Tag: "bytes,235,opt,name=x235",
-}
-
-var E_X236 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 236,
- Name: "testdata.x236",
- Tag: "bytes,236,opt,name=x236",
-}
-
-var E_X237 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 237,
- Name: "testdata.x237",
- Tag: "bytes,237,opt,name=x237",
-}
-
-var E_X238 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 238,
- Name: "testdata.x238",
- Tag: "bytes,238,opt,name=x238",
-}
-
-var E_X239 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 239,
- Name: "testdata.x239",
- Tag: "bytes,239,opt,name=x239",
-}
-
-var E_X240 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 240,
- Name: "testdata.x240",
- Tag: "bytes,240,opt,name=x240",
-}
-
-var E_X241 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 241,
- Name: "testdata.x241",
- Tag: "bytes,241,opt,name=x241",
-}
-
-var E_X242 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 242,
- Name: "testdata.x242",
- Tag: "bytes,242,opt,name=x242",
-}
-
-var E_X243 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 243,
- Name: "testdata.x243",
- Tag: "bytes,243,opt,name=x243",
-}
-
-var E_X244 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 244,
- Name: "testdata.x244",
- Tag: "bytes,244,opt,name=x244",
-}
-
-var E_X245 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 245,
- Name: "testdata.x245",
- Tag: "bytes,245,opt,name=x245",
-}
-
-var E_X246 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 246,
- Name: "testdata.x246",
- Tag: "bytes,246,opt,name=x246",
-}
-
-var E_X247 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 247,
- Name: "testdata.x247",
- Tag: "bytes,247,opt,name=x247",
-}
-
-var E_X248 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 248,
- Name: "testdata.x248",
- Tag: "bytes,248,opt,name=x248",
-}
-
-var E_X249 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 249,
- Name: "testdata.x249",
- Tag: "bytes,249,opt,name=x249",
-}
-
-var E_X250 = &proto.ExtensionDesc{
- ExtendedType: (*MyMessageSet)(nil),
- ExtensionType: (*Empty)(nil),
- Field: 250,
- Name: "testdata.x250",
- Tag: "bytes,250,opt,name=x250",
-}
-
-func init() {
- proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum")
- proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField")
- proto.RegisterType((*GoTest)(nil), "testdata.GoTest")
- proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup")
- proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup")
- proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup")
- proto.RegisterType((*GoTestRequiredGroupField)(nil), "testdata.GoTestRequiredGroupField")
- proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "testdata.GoTestRequiredGroupField.Group")
- proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest")
- proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup")
- proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest")
- proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest")
- proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag")
- proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage")
- proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested")
- proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage")
- proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested")
- proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage")
- proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage")
- proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage")
- proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage")
- proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup")
- proto.RegisterType((*Ext)(nil), "testdata.Ext")
- proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension")
- proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage")
- proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet")
- proto.RegisterType((*Empty)(nil), "testdata.Empty")
- proto.RegisterType((*MessageList)(nil), "testdata.MessageList")
- proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message")
- proto.RegisterType((*Strings)(nil), "testdata.Strings")
- proto.RegisterType((*Defaults)(nil), "testdata.Defaults")
- proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults")
- proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum")
- proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated")
- proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld")
- proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G")
- proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew")
- proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G")
- proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint")
- proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap")
- proto.RegisterType((*Oneof)(nil), "testdata.Oneof")
- proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group")
- proto.RegisterType((*Communique)(nil), "testdata.Communique")
- proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value)
- proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value)
- proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value)
- proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value)
- proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value)
- proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value)
- proto.RegisterExtension(E_Ext_More)
- proto.RegisterExtension(E_Ext_Text)
- proto.RegisterExtension(E_Ext_Number)
- proto.RegisterExtension(E_Greeting)
- proto.RegisterExtension(E_Complex)
- proto.RegisterExtension(E_RComplex)
- proto.RegisterExtension(E_NoDefaultDouble)
- proto.RegisterExtension(E_NoDefaultFloat)
- proto.RegisterExtension(E_NoDefaultInt32)
- proto.RegisterExtension(E_NoDefaultInt64)
- proto.RegisterExtension(E_NoDefaultUint32)
- proto.RegisterExtension(E_NoDefaultUint64)
- proto.RegisterExtension(E_NoDefaultSint32)
- proto.RegisterExtension(E_NoDefaultSint64)
- proto.RegisterExtension(E_NoDefaultFixed32)
- proto.RegisterExtension(E_NoDefaultFixed64)
- proto.RegisterExtension(E_NoDefaultSfixed32)
- proto.RegisterExtension(E_NoDefaultSfixed64)
- proto.RegisterExtension(E_NoDefaultBool)
- proto.RegisterExtension(E_NoDefaultString)
- proto.RegisterExtension(E_NoDefaultBytes)
- proto.RegisterExtension(E_NoDefaultEnum)
- proto.RegisterExtension(E_DefaultDouble)
- proto.RegisterExtension(E_DefaultFloat)
- proto.RegisterExtension(E_DefaultInt32)
- proto.RegisterExtension(E_DefaultInt64)
- proto.RegisterExtension(E_DefaultUint32)
- proto.RegisterExtension(E_DefaultUint64)
- proto.RegisterExtension(E_DefaultSint32)
- proto.RegisterExtension(E_DefaultSint64)
- proto.RegisterExtension(E_DefaultFixed32)
- proto.RegisterExtension(E_DefaultFixed64)
- proto.RegisterExtension(E_DefaultSfixed32)
- proto.RegisterExtension(E_DefaultSfixed64)
- proto.RegisterExtension(E_DefaultBool)
- proto.RegisterExtension(E_DefaultString)
- proto.RegisterExtension(E_DefaultBytes)
- proto.RegisterExtension(E_DefaultEnum)
- proto.RegisterExtension(E_X201)
- proto.RegisterExtension(E_X202)
- proto.RegisterExtension(E_X203)
- proto.RegisterExtension(E_X204)
- proto.RegisterExtension(E_X205)
- proto.RegisterExtension(E_X206)
- proto.RegisterExtension(E_X207)
- proto.RegisterExtension(E_X208)
- proto.RegisterExtension(E_X209)
- proto.RegisterExtension(E_X210)
- proto.RegisterExtension(E_X211)
- proto.RegisterExtension(E_X212)
- proto.RegisterExtension(E_X213)
- proto.RegisterExtension(E_X214)
- proto.RegisterExtension(E_X215)
- proto.RegisterExtension(E_X216)
- proto.RegisterExtension(E_X217)
- proto.RegisterExtension(E_X218)
- proto.RegisterExtension(E_X219)
- proto.RegisterExtension(E_X220)
- proto.RegisterExtension(E_X221)
- proto.RegisterExtension(E_X222)
- proto.RegisterExtension(E_X223)
- proto.RegisterExtension(E_X224)
- proto.RegisterExtension(E_X225)
- proto.RegisterExtension(E_X226)
- proto.RegisterExtension(E_X227)
- proto.RegisterExtension(E_X228)
- proto.RegisterExtension(E_X229)
- proto.RegisterExtension(E_X230)
- proto.RegisterExtension(E_X231)
- proto.RegisterExtension(E_X232)
- proto.RegisterExtension(E_X233)
- proto.RegisterExtension(E_X234)
- proto.RegisterExtension(E_X235)
- proto.RegisterExtension(E_X236)
- proto.RegisterExtension(E_X237)
- proto.RegisterExtension(E_X238)
- proto.RegisterExtension(E_X239)
- proto.RegisterExtension(E_X240)
- proto.RegisterExtension(E_X241)
- proto.RegisterExtension(E_X242)
- proto.RegisterExtension(E_X243)
- proto.RegisterExtension(E_X244)
- proto.RegisterExtension(E_X245)
- proto.RegisterExtension(E_X246)
- proto.RegisterExtension(E_X247)
- proto.RegisterExtension(E_X248)
- proto.RegisterExtension(E_X249)
- proto.RegisterExtension(E_X250)
-}
-
-func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
- // 4465 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x5a, 0xc9, 0x77, 0xdb, 0x48,
- 0x7a, 0x37, 0xc0, 0xfd, 0x23, 0x25, 0x42, 0x65, 0xb5, 0x9b, 0x96, 0xbc, 0xc0, 0x9c, 0xe9, 0x6e,
- 0x7a, 0xd3, 0x48, 0x20, 0x44, 0xdb, 0x74, 0xa7, 0xdf, 0xf3, 0x42, 0xc9, 0x7a, 0x63, 0x89, 0x0a,
- 0xa4, 0xee, 0x7e, 0xd3, 0x39, 0xf0, 0x51, 0x22, 0x48, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52,
- 0x72, 0xe9, 0x4b, 0x72, 0xcd, 0x76, 0xc9, 0x35, 0xa7, 0x9c, 0x92, 0xbc, 0x97, 0x7f, 0x22, 0xe9,
- 0xee, 0x59, 0x7b, 0xd6, 0xac, 0x93, 0x7d, 0x99, 0xec, 0xdb, 0x4c, 0x92, 0x4b, 0xcf, 0xab, 0xaf,
- 0x0a, 0x40, 0x01, 0x24, 0x20, 0xf9, 0x24, 0x56, 0xd5, 0xef, 0xf7, 0xd5, 0xf6, 0xab, 0xef, 0xab,
- 0xaf, 0x20, 0x00, 0xc7, 0x9c, 0x38, 0x2b, 0xa3, 0xb1, 0xed, 0xd8, 0x24, 0x4b, 0x7f, 0x77, 0xda,
- 0x4e, 0xbb, 0x7c, 0x1d, 0xd2, 0x9b, 0x76, 0xc3, 0x3a, 0x1a, 0x92, 0xab, 0x90, 0xe8, 0xda, 0x76,
- 0x49, 0x52, 0xe5, 0xca, 0xbc, 0x36, 0xb7, 0xe2, 0x22, 0x56, 0x36, 0x9a, 0x4d, 0x83, 0xb6, 0x94,
- 0xef, 0x40, 0x7e, 0xd3, 0xde, 0x37, 0x27, 0xce, 0x46, 0xdf, 0x1c, 0x74, 0xc8, 0x22, 0xa4, 0x9e,
- 0xb6, 0x0f, 0xcc, 0x01, 0x32, 0x72, 0x46, 0x6a, 0x40, 0x0b, 0x84, 0x40, 0x72, 0xff, 0x64, 0x64,
- 0x96, 0x64, 0xac, 0x4c, 0x3a, 0x27, 0x23, 0xb3, 0xfc, 0x2b, 0x57, 0x68, 0x27, 0x94, 0x49, 0xae,
- 0x43, 0xf2, 0xcb, 0x7d, 0xab, 0xc3, 0x7b, 0x79, 0xcd, 0xef, 0x85, 0xb5, 0xaf, 0x7c, 0x79, 0x6b,
- 0xe7, 0xb1, 0x91, 0x7c, 0xde, 0xb7, 0xd0, 0xfe, 0x7e, 0xfb, 0x60, 0x40, 0x4d, 0x49, 0xd4, 0xbe,
- 0x43, 0x0b, 0xb4, 0x76, 0xb7, 0x3d, 0x6e, 0x0f, 0x4b, 0x09, 0x55, 0xaa, 0xa4, 0x8c, 0xd4, 0x88,
- 0x16, 0xc8, 0x7d, 0x98, 0x33, 0xcc, 0x17, 0x47, 0xfd, 0xb1, 0xd9, 0xc1, 0xc1, 0x95, 0x92, 0xaa,
- 0x5c, 0xc9, 0x4f, 0xdb, 0xc7, 0x46, 0x63, 0x6e, 0x2c, 0x62, 0x19, 0x79, 0x64, 0xb6, 0x1d, 0x97,
- 0x9c, 0x52, 0x13, 0xb1, 0x64, 0x01, 0x4b, 0xc9, 0xcd, 0x91, 0xd3, 0xb7, 0xad, 0xf6, 0x80, 0x91,
- 0xd3, 0xaa, 0x14, 0x43, 0xb6, 0x45, 0x2c, 0x79, 0x13, 0x8a, 0x1b, 0xad, 0x87, 0xb6, 0x3d, 0x68,
- 0xb9, 0x23, 0x2a, 0x81, 0x2a, 0x57, 0xb2, 0xc6, 0x5c, 0x97, 0xd6, 0xba, 0x53, 0x22, 0x15, 0x50,
- 0x36, 0x5a, 0x5b, 0x96, 0x53, 0xd5, 0x7c, 0x60, 0x5e, 0x95, 0x2b, 0x29, 0x63, 0xbe, 0x8b, 0xd5,
- 0x53, 0xc8, 0x9a, 0xee, 0x23, 0x0b, 0xaa, 0x5c, 0x49, 0x30, 0x64, 0x4d, 0xf7, 0x90, 0xb7, 0x80,
- 0x6c, 0xb4, 0x36, 0xfa, 0xc7, 0x66, 0x47, 0xb4, 0x3a, 0xa7, 0xca, 0x95, 0x8c, 0xa1, 0x74, 0x79,
- 0xc3, 0x0c, 0xb4, 0x68, 0x79, 0x5e, 0x95, 0x2b, 0x69, 0x17, 0x2d, 0xd8, 0xbe, 0x01, 0x0b, 0x1b,
- 0xad, 0x77, 0xfb, 0xc1, 0x01, 0x17, 0x55, 0xb9, 0x32, 0x67, 0x14, 0xbb, 0xac, 0x7e, 0x1a, 0x2b,
- 0x1a, 0x56, 0x54, 0xb9, 0x92, 0xe4, 0x58, 0xc1, 0x2e, 0xce, 0x6e, 0x63, 0x60, 0xb7, 0x1d, 0x1f,
- 0xba, 0xa0, 0xca, 0x15, 0xd9, 0x98, 0xef, 0x62, 0x75, 0xd0, 0xea, 0x63, 0xfb, 0xe8, 0x60, 0x60,
- 0xfa, 0x50, 0xa2, 0xca, 0x15, 0xc9, 0x28, 0x76, 0x59, 0x7d, 0x10, 0xbb, 0xe7, 0x8c, 0xfb, 0x56,
- 0xcf, 0xc7, 0x9e, 0x47, 0xfd, 0x16, 0xbb, 0xac, 0x3e, 0x38, 0x82, 0x87, 0x27, 0x8e, 0x39, 0xf1,
- 0xa1, 0xa6, 0x2a, 0x57, 0x0a, 0xc6, 0x7c, 0x17, 0xab, 0x43, 0x56, 0x43, 0x6b, 0xd0, 0x55, 0xe5,
- 0xca, 0x02, 0xb5, 0x3a, 0x63, 0x0d, 0xf6, 0x42, 0x6b, 0xd0, 0x53, 0xe5, 0x0a, 0xe1, 0x58, 0x61,
- 0x0d, 0x44, 0xcd, 0x30, 0x21, 0x96, 0x16, 0xd5, 0x84, 0xa0, 0x19, 0x56, 0x19, 0xd4, 0x0c, 0x07,
- 0xbe, 0xa6, 0x26, 0x44, 0xcd, 0x84, 0x90, 0xd8, 0x39, 0x47, 0x5e, 0x50, 0x13, 0xa2, 0x66, 0x38,
- 0x32, 0xa4, 0x19, 0x8e, 0x7d, 0x5d, 0x4d, 0x04, 0x35, 0x33, 0x85, 0x16, 0x2d, 0x97, 0xd4, 0x44,
- 0x50, 0x33, 0x1c, 0x1d, 0xd4, 0x0c, 0x07, 0x5f, 0x54, 0x13, 0x01, 0xcd, 0x84, 0xb1, 0xa2, 0xe1,
- 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x38, 0x3b, 0x57, 0x33, 0x1c, 0xba, 0xac, 0x26, 0x44, 0xcd, 0x88,
- 0x56, 0x3d, 0xcd, 0x70, 0xe8, 0x25, 0x35, 0x11, 0xd0, 0x8c, 0x88, 0xf5, 0x34, 0xc3, 0xb1, 0x97,
- 0xd5, 0x44, 0x40, 0x33, 0x1c, 0x7b, 0x5d, 0xd4, 0x0c, 0x87, 0x7e, 0x2c, 0xa9, 0x09, 0x51, 0x34,
- 0x1c, 0x7a, 0x33, 0x20, 0x1a, 0x8e, 0xfd, 0x84, 0x62, 0x45, 0xd5, 0x84, 0xc1, 0xe2, 0x2a, 0x7c,
- 0x4a, 0xc1, 0xa2, 0x6c, 0x38, 0xd8, 0x97, 0x8d, 0xeb, 0x82, 0x4a, 0x57, 0x54, 0xc9, 0x93, 0x8d,
- 0xeb, 0xc3, 0x44, 0xd9, 0x78, 0xc0, 0xab, 0xe8, 0x6a, 0xb9, 0x6c, 0xa6, 0x90, 0x35, 0xdd, 0x47,
- 0xaa, 0xaa, 0xe4, 0xcb, 0xc6, 0x43, 0x06, 0x64, 0xe3, 0x61, 0xaf, 0xa9, 0x92, 0x28, 0x9b, 0x19,
- 0x68, 0xd1, 0x72, 0x59, 0x95, 0x44, 0xd9, 0x78, 0x68, 0x51, 0x36, 0x1e, 0xf8, 0x0b, 0xaa, 0x24,
- 0xc8, 0x66, 0x1a, 0x2b, 0x1a, 0xfe, 0xa2, 0x2a, 0x09, 0xb2, 0x09, 0xce, 0x8e, 0xc9, 0xc6, 0x83,
- 0xbe, 0xa1, 0x4a, 0xbe, 0x6c, 0x82, 0x56, 0xb9, 0x6c, 0x3c, 0xe8, 0x9b, 0xaa, 0x24, 0xc8, 0x26,
- 0x88, 0xe5, 0xb2, 0xf1, 0xb0, 0x6f, 0x61, 0x7c, 0x73, 0x65, 0xe3, 0x61, 0x05, 0xd9, 0x78, 0xd0,
- 0xdf, 0xa1, 0xb1, 0xd0, 0x93, 0x8d, 0x07, 0x15, 0x65, 0xe3, 0x61, 0x7f, 0x97, 0x62, 0x7d, 0xd9,
- 0x4c, 0x83, 0xc5, 0x55, 0xf8, 0x3d, 0x0a, 0xf6, 0x65, 0xe3, 0x81, 0x57, 0x70, 0x10, 0x54, 0x36,
- 0x1d, 0xb3, 0xdb, 0x3e, 0x1a, 0x50, 0x89, 0x55, 0xa8, 0x6e, 0xea, 0x49, 0x67, 0x7c, 0x64, 0xd2,
- 0x91, 0xd8, 0xf6, 0xe0, 0xb1, 0xdb, 0x46, 0x56, 0xa8, 0x71, 0x26, 0x1f, 0x9f, 0x70, 0x9d, 0xea,
- 0xa7, 0x2e, 0x57, 0x35, 0xa3, 0xc8, 0x34, 0x34, 0x8d, 0xaf, 0xe9, 0x02, 0xfe, 0x06, 0x55, 0x51,
- 0x5d, 0xae, 0xe9, 0x0c, 0x5f, 0xd3, 0x7d, 0x7c, 0x15, 0xce, 0xfb, 0x52, 0xf2, 0x19, 0x37, 0xa9,
- 0x96, 0xea, 0x89, 0xaa, 0xb6, 0x6a, 0x2c, 0xb8, 0x82, 0x9a, 0x45, 0x0a, 0x74, 0x73, 0x8b, 0x4a,
- 0xaa, 0x9e, 0xa8, 0xe9, 0x1e, 0x49, 0xec, 0x49, 0xa3, 0x32, 0xe4, 0xc2, 0xf2, 0x39, 0xb7, 0xa9,
- 0xb2, 0xea, 0xc9, 0xaa, 0xb6, 0xba, 0x6a, 0x28, 0x5c, 0x5f, 0x33, 0x38, 0x81, 0x7e, 0x56, 0xa8,
- 0xc2, 0xea, 0xc9, 0x9a, 0xee, 0x71, 0x82, 0xfd, 0x2c, 0xb8, 0x42, 0xf3, 0x29, 0x5f, 0xa2, 0x4a,
- 0xab, 0xa7, 0xab, 0x6b, 0xfa, 0xda, 0xfa, 0x3d, 0xa3, 0xc8, 0x14, 0xe7, 0x73, 0x74, 0xda, 0x0f,
- 0x97, 0x9c, 0x4f, 0x5a, 0xa5, 0x9a, 0xab, 0xa7, 0xb5, 0x3b, 0x6b, 0x77, 0xb5, 0xbb, 0x86, 0xc2,
- 0xb5, 0xe7, 0xb3, 0xde, 0xa1, 0x2c, 0x2e, 0x3e, 0x9f, 0xb5, 0x46, 0xd5, 0x57, 0x57, 0x9e, 0x99,
- 0x83, 0x81, 0x7d, 0x4b, 0x2d, 0xbf, 0xb4, 0xc7, 0x83, 0xce, 0xb5, 0x32, 0x18, 0x0a, 0xd7, 0xa3,
- 0xd8, 0xeb, 0x82, 0x2b, 0x48, 0x9f, 0xfe, 0x6b, 0xf4, 0x1e, 0x56, 0xa8, 0x67, 0x1e, 0xf6, 0x7b,
- 0x96, 0x3d, 0x31, 0x8d, 0x22, 0x93, 0x66, 0x68, 0x4d, 0xf6, 0xc2, 0xeb, 0xf8, 0xeb, 0x94, 0xb6,
- 0x50, 0x4f, 0xdc, 0xae, 0x6a, 0xb4, 0xa7, 0x59, 0xeb, 0xb8, 0x17, 0x5e, 0xc7, 0xdf, 0xa0, 0x1c,
- 0x52, 0x4f, 0xdc, 0xae, 0xe9, 0x9c, 0x23, 0xae, 0xe3, 0x1d, 0xb8, 0x10, 0x8a, 0x8b, 0xad, 0x51,
- 0xfb, 0xf0, 0xb9, 0xd9, 0x29, 0x69, 0x34, 0x3c, 0x3e, 0x94, 0x15, 0xc9, 0x38, 0x1f, 0x08, 0x91,
- 0xbb, 0xd8, 0x4c, 0xee, 0xc1, 0xeb, 0xe1, 0x40, 0xe9, 0x32, 0xab, 0x34, 0x5e, 0x22, 0x73, 0x31,
- 0x18, 0x33, 0x43, 0x54, 0xc1, 0x01, 0xbb, 0x54, 0x9d, 0x06, 0x50, 0x9f, 0xea, 0x7b, 0x62, 0x4e,
- 0xfd, 0x19, 0xb8, 0x38, 0x1d, 0x4a, 0x5d, 0xf2, 0x3a, 0x8d, 0xa8, 0x48, 0xbe, 0x10, 0x8e, 0xaa,
- 0x53, 0xf4, 0x19, 0x7d, 0xd7, 0x68, 0x88, 0x15, 0xe9, 0x53, 0xbd, 0xdf, 0x87, 0xd2, 0x54, 0xb0,
- 0x75, 0xd9, 0x77, 0x68, 0xcc, 0x45, 0xf6, 0x6b, 0xa1, 0xb8, 0x1b, 0x26, 0xcf, 0xe8, 0xfa, 0x2e,
- 0x0d, 0xc2, 0x02, 0x79, 0xaa, 0x67, 0x5c, 0xb2, 0x60, 0x38, 0x76, 0xb9, 0xf7, 0x68, 0x54, 0xe6,
- 0x4b, 0x16, 0x88, 0xcc, 0x62, 0xbf, 0xa1, 0xf8, 0xec, 0x72, 0xeb, 0x34, 0x4c, 0xf3, 0x7e, 0x83,
- 0xa1, 0x9a, 0x93, 0xdf, 0xa6, 0xe4, 0xbd, 0xd9, 0x33, 0xfe, 0x71, 0x82, 0x06, 0x58, 0xce, 0xde,
- 0x9b, 0x35, 0x65, 0x8f, 0x3d, 0x63, 0xca, 0x3f, 0xa1, 0x6c, 0x22, 0xb0, 0xa7, 0xe6, 0xfc, 0x18,
- 0xbc, 0x8c, 0xa3, 0x37, 0xb6, 0x8f, 0x46, 0xa5, 0x0d, 0x55, 0xae, 0x80, 0x76, 0x65, 0x2a, 0xfb,
- 0x71, 0x2f, 0x79, 0x9b, 0x14, 0x65, 0x04, 0x49, 0xcc, 0x0a, 0xb3, 0xcb, 0xac, 0xec, 0xaa, 0x89,
- 0x08, 0x2b, 0x0c, 0xe5, 0x59, 0x11, 0x48, 0xd4, 0x8a, 0xeb, 0xf4, 0x99, 0x95, 0x0f, 0x54, 0x69,
- 0xa6, 0x15, 0x37, 0x04, 0x70, 0x2b, 0x01, 0xd2, 0xd2, 0xba, 0x9f, 0x6f, 0x61, 0x3b, 0xf9, 0x62,
- 0x38, 0x01, 0xdb, 0xc4, 0xfb, 0x73, 0x30, 0xd3, 0x62, 0x34, 0x61, 0x70, 0xd3, 0xb4, 0x9f, 0x8d,
- 0xa0, 0x05, 0x46, 0x33, 0x4d, 0xfb, 0xb9, 0x19, 0xb4, 0xf2, 0x6f, 0x4a, 0x90, 0xa4, 0xf9, 0x24,
- 0xc9, 0x42, 0xf2, 0xbd, 0xe6, 0xd6, 0x63, 0xe5, 0x1c, 0xfd, 0xf5, 0xb0, 0xd9, 0x7c, 0xaa, 0x48,
- 0x24, 0x07, 0xa9, 0x87, 0x5f, 0xd9, 0x6f, 0xec, 0x29, 0x32, 0x29, 0x42, 0x7e, 0x63, 0x6b, 0x67,
- 0xb3, 0x61, 0xec, 0x1a, 0x5b, 0x3b, 0xfb, 0x4a, 0x82, 0xb6, 0x6d, 0x3c, 0x6d, 0x3e, 0xd8, 0x57,
- 0x92, 0x24, 0x03, 0x09, 0x5a, 0x97, 0x22, 0x00, 0xe9, 0xbd, 0x7d, 0x63, 0x6b, 0x67, 0x53, 0x49,
- 0x53, 0x2b, 0xfb, 0x5b, 0xdb, 0x0d, 0x25, 0x43, 0x91, 0xfb, 0xef, 0xee, 0x3e, 0x6d, 0x28, 0x59,
- 0xfa, 0xf3, 0x81, 0x61, 0x3c, 0xf8, 0x8a, 0x92, 0xa3, 0xa4, 0xed, 0x07, 0xbb, 0x0a, 0x60, 0xf3,
- 0x83, 0x87, 0x4f, 0x1b, 0x4a, 0x9e, 0x14, 0x20, 0xbb, 0xf1, 0xee, 0xce, 0xa3, 0xfd, 0xad, 0xe6,
- 0x8e, 0x52, 0x28, 0x9f, 0x40, 0x89, 0x2d, 0x73, 0x60, 0x15, 0x59, 0x52, 0xf8, 0x0e, 0xa4, 0xd8,
- 0xce, 0x48, 0xa8, 0x92, 0x4a, 0x78, 0x67, 0xa6, 0x29, 0x2b, 0x6c, 0x8f, 0x18, 0x6d, 0xe9, 0x32,
- 0xa4, 0xd8, 0x2a, 0x2d, 0x42, 0x8a, 0xad, 0x8e, 0x8c, 0xa9, 0x62, 0xaa, 0x8b, 0xab, 0xf2, 0x5b,
- 0x32, 0xc0, 0xa6, 0xbd, 0xf7, 0xbc, 0x3f, 0xc2, 0x84, 0xfc, 0x32, 0xc0, 0xe4, 0x79, 0x7f, 0xd4,
- 0x42, 0xd5, 0xf3, 0xa4, 0x32, 0x47, 0x6b, 0xd0, 0xdf, 0x91, 0x6b, 0x50, 0xc0, 0xe6, 0x2e, 0xf3,
- 0x42, 0x98, 0x4b, 0x66, 0x8c, 0x3c, 0xad, 0xe3, 0x8e, 0x29, 0x08, 0xa9, 0xe9, 0x98, 0x42, 0xa6,
- 0x05, 0x48, 0x4d, 0x27, 0x57, 0x01, 0x8b, 0xad, 0x09, 0x46, 0x14, 0x4c, 0x1b, 0x73, 0x06, 0xf6,
- 0xcb, 0x62, 0x0c, 0x79, 0x1b, 0xb0, 0x4f, 0x36, 0xef, 0xe2, 0xf4, 0xe9, 0x70, 0x87, 0xbb, 0x42,
- 0x7f, 0xb0, 0xd9, 0xfa, 0x84, 0xa5, 0x26, 0xe4, 0xbc, 0x7a, 0xda, 0x17, 0xd6, 0xf2, 0x19, 0x29,
- 0x38, 0x23, 0xc0, 0x2a, 0x6f, 0x4a, 0x0c, 0xc0, 0x47, 0xb3, 0x80, 0xa3, 0x61, 0x24, 0x36, 0x9c,
- 0xf2, 0x65, 0x98, 0xdb, 0xb1, 0x2d, 0x76, 0x7a, 0x71, 0x95, 0x0a, 0x20, 0xb5, 0x4b, 0x12, 0x66,
- 0x4f, 0x52, 0xbb, 0x7c, 0x05, 0x40, 0x68, 0x53, 0x40, 0x3a, 0x60, 0x6d, 0xe8, 0x03, 0xa4, 0x83,
- 0xf2, 0x4d, 0x48, 0x6f, 0xb7, 0x8f, 0xf7, 0xdb, 0x3d, 0x72, 0x0d, 0x60, 0xd0, 0x9e, 0x38, 0x2d,
- 0x5c, 0xfa, 0xd2, 0xe7, 0x9f, 0x7f, 0xfe, 0xb9, 0x84, 0x97, 0xbd, 0x1c, 0xad, 0x65, 0x2a, 0x7d,
- 0x01, 0xd0, 0x1c, 0x74, 0xb6, 0xcd, 0xc9, 0xa4, 0xdd, 0x33, 0x49, 0x15, 0xd2, 0x96, 0x39, 0xa1,
- 0xd1, 0x4e, 0xc2, 0x77, 0x84, 0x65, 0x7f, 0x15, 0x7c, 0xd4, 0xca, 0x0e, 0x42, 0x0c, 0x0e, 0x25,
- 0x0a, 0x24, 0xac, 0xa3, 0x21, 0xbe, 0x93, 0xa4, 0x0c, 0xfa, 0x73, 0xe9, 0x12, 0xa4, 0x19, 0x86,
- 0x10, 0x48, 0x5a, 0xed, 0xa1, 0x59, 0x62, 0xfd, 0xe2, 0xef, 0xf2, 0xaf, 0x4a, 0x00, 0x3b, 0xe6,
- 0xcb, 0x33, 0xf4, 0xe9, 0xa3, 0x62, 0xfa, 0x4c, 0xb0, 0x3e, 0xef, 0xc7, 0xf5, 0x49, 0x75, 0xd6,
- 0xb5, 0xed, 0x4e, 0x8b, 0x6d, 0x31, 0x7b, 0xd2, 0xc9, 0xd1, 0x1a, 0xdc, 0xb5, 0xf2, 0x07, 0x50,
- 0xd8, 0xb2, 0x2c, 0x73, 0xec, 0x8e, 0x89, 0x40, 0xf2, 0x99, 0x3d, 0x71, 0xf8, 0xdb, 0x12, 0xfe,
- 0x26, 0x25, 0x48, 0x8e, 0xec, 0xb1, 0xc3, 0xe6, 0x59, 0x4f, 0xea, 0xab, 0xab, 0xab, 0x06, 0xd6,
- 0x90, 0x4b, 0x90, 0x3b, 0xb4, 0x2d, 0xcb, 0x3c, 0xa4, 0x93, 0x48, 0x60, 0x5a, 0xe3, 0x57, 0x94,
- 0x7f, 0x59, 0x82, 0x42, 0xd3, 0x79, 0xe6, 0x1b, 0x57, 0x20, 0xf1, 0xdc, 0x3c, 0xc1, 0xe1, 0x25,
- 0x0c, 0xfa, 0x93, 0x1e, 0x95, 0x9f, 0x6f, 0x0f, 0x8e, 0xd8, 0x5b, 0x53, 0xc1, 0x60, 0x05, 0x72,
- 0x01, 0xd2, 0x2f, 0xcd, 0x7e, 0xef, 0x99, 0x83, 0x36, 0x65, 0x83, 0x97, 0xc8, 0x2d, 0x48, 0xf5,
- 0xe9, 0x60, 0x4b, 0x49, 0x5c, 0xaf, 0x0b, 0xfe, 0x7a, 0x89, 0x73, 0x30, 0x18, 0xe8, 0x46, 0x36,
- 0xdb, 0x51, 0x3e, 0xfa, 0xe8, 0xa3, 0x8f, 0xe4, 0x72, 0x17, 0x16, 0xdd, 0xc3, 0x1b, 0x98, 0xec,
- 0x0e, 0x94, 0x06, 0xa6, 0xdd, 0xea, 0xf6, 0xad, 0xf6, 0x60, 0x70, 0xd2, 0x7a, 0x69, 0x5b, 0xad,
- 0xb6, 0xd5, 0xb2, 0x27, 0x87, 0xed, 0x31, 0x2e, 0x40, 0x74, 0x17, 0x8b, 0x03, 0xd3, 0xde, 0x60,
- 0xb4, 0xf7, 0x6d, 0xeb, 0x81, 0xd5, 0xa4, 0x9c, 0xf2, 0x1f, 0x24, 0x21, 0xb7, 0x7d, 0xe2, 0x5a,
- 0x5f, 0x84, 0xd4, 0xa1, 0x7d, 0x64, 0xb1, 0xb5, 0x4c, 0x19, 0xac, 0xe0, 0xed, 0x91, 0x2c, 0xec,
- 0xd1, 0x22, 0xa4, 0x5e, 0x1c, 0xd9, 0x8e, 0x89, 0xd3, 0xcd, 0x19, 0xac, 0x40, 0x57, 0x6b, 0x64,
- 0x3a, 0xa5, 0x24, 0x26, 0xb7, 0xf4, 0xa7, 0x3f, 0xff, 0xd4, 0x19, 0xe6, 0x4f, 0x56, 0x20, 0x6d,
- 0xd3, 0xd5, 0x9f, 0x94, 0xd2, 0xf8, 0xae, 0x26, 0xc0, 0xc5, 0x5d, 0x31, 0x38, 0x8a, 0x6c, 0xc1,
- 0xc2, 0x4b, 0xb3, 0x35, 0x3c, 0x9a, 0x38, 0xad, 0x9e, 0xdd, 0xea, 0x98, 0xe6, 0xc8, 0x1c, 0x97,
- 0xe6, 0xb0, 0x27, 0xc1, 0x27, 0xcc, 0x5a, 0x48, 0x63, 0xfe, 0xa5, 0xb9, 0x7d, 0x34, 0x71, 0x36,
- 0xed, 0xc7, 0xc8, 0x22, 0x55, 0xc8, 0x8d, 0x4d, 0xea, 0x09, 0xe8, 0x60, 0x0b, 0xe1, 0xde, 0x03,
- 0xd4, 0xec, 0xd8, 0x1c, 0x61, 0x05, 0x59, 0x87, 0xec, 0x41, 0xff, 0xb9, 0x39, 0x79, 0x66, 0x76,
- 0x4a, 0x19, 0x55, 0xaa, 0xcc, 0x6b, 0x17, 0x7d, 0x8e, 0xb7, 0xac, 0x2b, 0x8f, 0xec, 0x81, 0x3d,
- 0x36, 0x3c, 0x28, 0xb9, 0x0f, 0xb9, 0x89, 0x3d, 0x34, 0x99, 0xbe, 0xb3, 0x18, 0x54, 0x2f, 0xcf,
- 0xe2, 0xed, 0xd9, 0x43, 0xd3, 0xf5, 0x60, 0x2e, 0x9e, 0x2c, 0xb3, 0x81, 0x1e, 0xd0, 0xab, 0x73,
- 0x09, 0xf0, 0x69, 0x80, 0x0e, 0x08, 0xaf, 0xd2, 0x64, 0x89, 0x0e, 0xa8, 0xd7, 0xa5, 0x37, 0xa2,
- 0x52, 0x1e, 0xf3, 0x4a, 0xaf, 0xbc, 0x74, 0x0b, 0x72, 0x9e, 0x41, 0xdf, 0xf5, 0x31, 0x77, 0x93,
- 0x43, 0x7f, 0xc0, 0x5c, 0x1f, 0xf3, 0x35, 0x6f, 0x40, 0x0a, 0x87, 0x4d, 0x23, 0x94, 0xd1, 0xa0,
- 0x01, 0x31, 0x07, 0xa9, 0x4d, 0xa3, 0xd1, 0xd8, 0x51, 0x24, 0x8c, 0x8d, 0x4f, 0xdf, 0x6d, 0x28,
- 0xb2, 0xa0, 0xd8, 0xdf, 0x96, 0x20, 0xd1, 0x38, 0x46, 0xb5, 0xd0, 0x69, 0xb8, 0x27, 0x9a, 0xfe,
- 0xd6, 0x6a, 0x90, 0x1c, 0xda, 0x63, 0x93, 0x9c, 0x9f, 0x31, 0xcb, 0x52, 0x0f, 0xf7, 0x4b, 0x78,
- 0x45, 0x6e, 0x1c, 0x3b, 0x06, 0xe2, 0xb5, 0xb7, 0x20, 0xe9, 0x98, 0xc7, 0xce, 0x6c, 0xde, 0x33,
- 0xd6, 0x01, 0x05, 0x68, 0x37, 0x21, 0x6d, 0x1d, 0x0d, 0x0f, 0xcc, 0xf1, 0x6c, 0x68, 0x1f, 0xa7,
- 0xc7, 0x21, 0xe5, 0xf7, 0x40, 0x79, 0x64, 0x0f, 0x47, 0x03, 0xf3, 0xb8, 0x71, 0xec, 0x98, 0xd6,
- 0xa4, 0x6f, 0x5b, 0x54, 0xcf, 0xdd, 0xfe, 0x18, 0xbd, 0x88, 0xc4, 0x02, 0xe0, 0x78, 0xe2, 0xd0,
- 0x53, 0x3d, 0x31, 0x0f, 0x6d, 0xab, 0xc3, 0x1d, 0x26, 0x2f, 0x51, 0xb4, 0xf3, 0xac, 0x3f, 0xa6,
- 0x0e, 0x84, 0xfa, 0x79, 0x56, 0x28, 0x6f, 0x42, 0x91, 0xe7, 0x18, 0x13, 0xde, 0x71, 0xf9, 0x06,
- 0x14, 0xdc, 0x2a, 0x7c, 0x38, 0xcf, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x54, 0xce, 0xd1, 0x65, 0x6d,
- 0xee, 0x34, 0x14, 0x89, 0xfe, 0xd8, 0x7f, 0xbf, 0x19, 0x58, 0xca, 0x4b, 0x50, 0xf0, 0xc6, 0xbe,
- 0x67, 0x3a, 0xd8, 0x42, 0x03, 0x42, 0xa6, 0x2e, 0x67, 0xa5, 0x72, 0x06, 0x52, 0x8d, 0xe1, 0xc8,
- 0x39, 0x29, 0xff, 0x22, 0xe4, 0x39, 0xe8, 0x69, 0x7f, 0xe2, 0x90, 0x3b, 0x90, 0x19, 0xf2, 0xf9,
- 0x4a, 0x78, 0xdd, 0x13, 0x35, 0xe5, 0xe3, 0xdc, 0xdf, 0x86, 0x8b, 0x5e, 0xaa, 0x42, 0x46, 0xf0,
- 0xa5, 0xfc, 0xa8, 0xcb, 0xe2, 0x51, 0x67, 0x4e, 0x21, 0x21, 0x38, 0x85, 0xf2, 0x36, 0x64, 0x58,
- 0x04, 0x9c, 0x60, 0x54, 0x67, 0xa9, 0x22, 0x13, 0x13, 0xdb, 0xf9, 0x3c, 0xab, 0x63, 0x17, 0x95,
- 0xab, 0x90, 0x47, 0xc1, 0x72, 0x04, 0x73, 0x9d, 0x80, 0x55, 0x4c, 0x6e, 0xbf, 0x9f, 0x82, 0xac,
- 0xbb, 0x52, 0x64, 0x19, 0xd2, 0x2c, 0x3f, 0x43, 0x53, 0xee, 0xfb, 0x41, 0x0a, 0x33, 0x32, 0xb2,
- 0x0c, 0x19, 0x9e, 0x83, 0x71, 0xef, 0x2e, 0x57, 0x35, 0x23, 0xcd, 0x72, 0x2e, 0xaf, 0xb1, 0xa6,
- 0xa3, 0x63, 0x62, 0x2f, 0x03, 0x69, 0x96, 0x55, 0x11, 0x15, 0x72, 0x5e, 0x1e, 0x85, 0xfe, 0x98,
- 0x3f, 0x03, 0x64, 0xdd, 0xc4, 0x49, 0x40, 0xd4, 0x74, 0xf4, 0x58, 0x3c, 0xe7, 0xcf, 0x76, 0xfd,
- 0xeb, 0x49, 0xd6, 0xcd, 0x86, 0xf0, 0xf9, 0xde, 0x4d, 0xf0, 0x33, 0x3c, 0xff, 0xf1, 0x01, 0x35,
- 0x1d, 0x5d, 0x82, 0x9b, 0xcd, 0x67, 0x78, 0x8e, 0x43, 0xae, 0xd2, 0x21, 0x62, 0xce, 0x82, 0x47,
- 0xdf, 0x4f, 0xdd, 0xd3, 0x2c, 0x93, 0x21, 0xd7, 0xa8, 0x05, 0x96, 0x98, 0xe0, 0xb9, 0xf4, 0xf3,
- 0xf4, 0x0c, 0xcf, 0x57, 0xc8, 0x4d, 0x0a, 0x61, 0xcb, 0x5f, 0x82, 0x88, 0xa4, 0x3c, 0xc3, 0x93,
- 0x72, 0xa2, 0xd2, 0x0e, 0xd1, 0x3d, 0xa0, 0x4b, 0x10, 0x12, 0xf0, 0x34, 0x4b, 0xc0, 0xc9, 0x15,
- 0x34, 0xc7, 0x26, 0x55, 0xf0, 0x93, 0xed, 0x0c, 0x4f, 0x70, 0xfc, 0x76, 0xbc, 0xb2, 0x79, 0x89,
- 0x75, 0x86, 0xa7, 0x30, 0xa4, 0x46, 0xf7, 0x8b, 0xea, 0xbb, 0x34, 0x8f, 0x4e, 0xb0, 0xe4, 0x0b,
- 0xcf, 0xdd, 0x53, 0xe6, 0x03, 0xeb, 0xcc, 0x83, 0x18, 0xa9, 0x2e, 0x9e, 0x86, 0x25, 0xca, 0xdb,
- 0xed, 0x5b, 0xdd, 0x52, 0x11, 0x57, 0x22, 0xd1, 0xb7, 0xba, 0x46, 0xaa, 0x4b, 0x6b, 0x98, 0x06,
- 0x76, 0x68, 0x9b, 0x82, 0x6d, 0xc9, 0xdb, 0xac, 0x91, 0x56, 0x91, 0x12, 0xa4, 0x36, 0x5a, 0x3b,
- 0x6d, 0xab, 0xb4, 0xc0, 0x78, 0x56, 0xdb, 0x32, 0x92, 0xdd, 0x9d, 0xb6, 0x45, 0xde, 0x82, 0xc4,
- 0xe4, 0xe8, 0xa0, 0x44, 0xc2, 0x5f, 0x56, 0xf6, 0x8e, 0x0e, 0xdc, 0xa1, 0x18, 0x14, 0x41, 0x96,
- 0x21, 0x3b, 0x71, 0xc6, 0xad, 0x5f, 0x30, 0xc7, 0x76, 0xe9, 0x3c, 0x2e, 0xe1, 0x39, 0x23, 0x33,
- 0x71, 0xc6, 0x1f, 0x98, 0x63, 0xfb, 0x8c, 0xce, 0xaf, 0x7c, 0x05, 0xf2, 0x82, 0x5d, 0x52, 0x04,
- 0xc9, 0x62, 0x37, 0x85, 0xba, 0x74, 0xc7, 0x90, 0xac, 0xf2, 0x3e, 0x14, 0xdc, 0x1c, 0x06, 0xe7,
- 0xab, 0xd1, 0x93, 0x34, 0xb0, 0xc7, 0x78, 0x3e, 0xe7, 0xb5, 0x4b, 0x62, 0x88, 0xf2, 0x61, 0x3c,
- 0x5c, 0x30, 0x68, 0x59, 0x09, 0x0d, 0x45, 0x2a, 0xff, 0x50, 0x82, 0xc2, 0xb6, 0x3d, 0xf6, 0x1f,
- 0x98, 0x17, 0x21, 0x75, 0x60, 0xdb, 0x83, 0x09, 0x9a, 0xcd, 0x1a, 0xac, 0x40, 0xde, 0x80, 0x02,
- 0xfe, 0x70, 0x73, 0x4f, 0xd9, 0x7b, 0xda, 0xc8, 0x63, 0x3d, 0x4f, 0x38, 0x09, 0x24, 0xfb, 0x96,
- 0x33, 0xe1, 0x9e, 0x0c, 0x7f, 0x93, 0x2f, 0x40, 0x9e, 0xfe, 0x75, 0x99, 0x49, 0xef, 0xc2, 0x0a,
- 0xb4, 0x9a, 0x13, 0xdf, 0x82, 0x39, 0xdc, 0x7d, 0x0f, 0x96, 0xf1, 0x9e, 0x31, 0x0a, 0xac, 0x81,
- 0x03, 0x4b, 0x90, 0x61, 0xae, 0x60, 0x82, 0x5f, 0xcb, 0x72, 0x86, 0x5b, 0xa4, 0xee, 0x15, 0x33,
- 0x01, 0x16, 0xee, 0x33, 0x06, 0x2f, 0x95, 0x1f, 0x40, 0x16, 0xa3, 0x54, 0x73, 0xd0, 0x21, 0x65,
- 0x90, 0x7a, 0x25, 0x13, 0x63, 0xe4, 0xa2, 0x70, 0xcd, 0xe7, 0xcd, 0x2b, 0x9b, 0x86, 0xd4, 0x5b,
- 0x5a, 0x00, 0x69, 0x93, 0xde, 0xbb, 0x8f, 0xb9, 0x9b, 0x96, 0x8e, 0xcb, 0x4d, 0x6e, 0x62, 0xc7,
- 0x7c, 0x19, 0x67, 0x62, 0xc7, 0x7c, 0xc9, 0x4c, 0x5c, 0x9d, 0x32, 0x41, 0x4b, 0x27, 0xfc, 0xd3,
- 0xa1, 0x74, 0x52, 0xae, 0xc2, 0x1c, 0x1e, 0xcf, 0xbe, 0xd5, 0xdb, 0xb5, 0xfb, 0x16, 0xde, 0xf3,
- 0xbb, 0x78, 0x4f, 0x92, 0x0c, 0xa9, 0x4b, 0xf7, 0xc0, 0x3c, 0x6e, 0x1f, 0xb2, 0x1b, 0x67, 0xd6,
- 0x60, 0x85, 0xf2, 0x67, 0x49, 0x98, 0xe7, 0xae, 0xf5, 0xfd, 0xbe, 0xf3, 0x6c, 0xbb, 0x3d, 0x22,
- 0x4f, 0xa1, 0x40, 0xbd, 0x6a, 0x6b, 0xd8, 0x1e, 0x8d, 0xe8, 0xf1, 0x95, 0xf0, 0xaa, 0x71, 0x7d,
- 0xca, 0x55, 0x73, 0xfc, 0xca, 0x4e, 0x7b, 0x68, 0x6e, 0x33, 0x6c, 0xc3, 0x72, 0xc6, 0x27, 0x46,
- 0xde, 0xf2, 0x6b, 0xc8, 0x16, 0xe4, 0x87, 0x93, 0x9e, 0x67, 0x4c, 0x46, 0x63, 0x95, 0x48, 0x63,
- 0xdb, 0x93, 0x5e, 0xc0, 0x16, 0x0c, 0xbd, 0x0a, 0x3a, 0x30, 0xea, 0x8f, 0x3d, 0x5b, 0x89, 0x53,
- 0x06, 0x46, 0x5d, 0x47, 0x70, 0x60, 0x07, 0x7e, 0x0d, 0x79, 0x0c, 0x40, 0x8f, 0x97, 0x63, 0xd3,
- 0xd4, 0x09, 0x15, 0x94, 0xd7, 0xde, 0x8c, 0xb4, 0xb5, 0xe7, 0x8c, 0xf7, 0xed, 0x3d, 0x67, 0xcc,
- 0x0c, 0xd1, 0x83, 0x89, 0xc5, 0xa5, 0x77, 0x40, 0x09, 0xcf, 0x5f, 0xbc, 0x91, 0xa7, 0x66, 0xdc,
- 0xc8, 0x73, 0xfc, 0x46, 0x5e, 0x97, 0xef, 0x4a, 0x4b, 0xef, 0x41, 0x31, 0x34, 0x65, 0x91, 0x4e,
- 0x18, 0xfd, 0xb6, 0x48, 0xcf, 0x6b, 0xaf, 0x0b, 0x9f, 0xb3, 0xc5, 0x0d, 0x17, 0xed, 0xbe, 0x03,
- 0x4a, 0x78, 0xfa, 0xa2, 0xe1, 0x6c, 0x4c, 0xa6, 0x80, 0xfc, 0xfb, 0x30, 0x17, 0x98, 0xb2, 0x48,
- 0xce, 0x9d, 0x32, 0xa9, 0xf2, 0x2f, 0xa5, 0x20, 0xd5, 0xb4, 0x4c, 0xbb, 0x4b, 0x5e, 0x0f, 0xc6,
- 0xc9, 0x27, 0xe7, 0xdc, 0x18, 0x79, 0x31, 0x14, 0x23, 0x9f, 0x9c, 0xf3, 0x22, 0xe4, 0xc5, 0x50,
- 0x84, 0x74, 0x9b, 0x6a, 0x3a, 0xb9, 0x3c, 0x15, 0x1f, 0x9f, 0x9c, 0x13, 0x82, 0xe3, 0xe5, 0xa9,
- 0xe0, 0xe8, 0x37, 0xd7, 0x74, 0xea, 0x50, 0x83, 0x91, 0xf1, 0xc9, 0x39, 0x3f, 0x2a, 0x2e, 0x87,
- 0xa3, 0xa2, 0xd7, 0x58, 0xd3, 0xd9, 0x90, 0x84, 0x88, 0x88, 0x43, 0x62, 0xb1, 0x70, 0x39, 0x1c,
- 0x0b, 0x91, 0xc7, 0xa3, 0xe0, 0x72, 0x38, 0x0a, 0x62, 0x23, 0x8f, 0x7a, 0x17, 0x43, 0x51, 0x0f,
- 0x8d, 0xb2, 0x70, 0xb7, 0x1c, 0x0e, 0x77, 0x8c, 0x27, 0x8c, 0x54, 0x8c, 0x75, 0x5e, 0x63, 0x4d,
- 0x27, 0x5a, 0x28, 0xd0, 0x45, 0xdf, 0xf6, 0x71, 0x2f, 0xd0, 0xe9, 0xeb, 0x74, 0xd9, 0xdc, 0x8b,
- 0x68, 0x31, 0xe6, 0x8b, 0x3f, 0xae, 0xa6, 0x7b, 0x11, 0xd3, 0x20, 0xd3, 0xe5, 0x09, 0xb0, 0x82,
- 0x9e, 0x4b, 0x90, 0x25, 0x6e, 0xfe, 0xca, 0x46, 0x0b, 0x3d, 0x18, 0xce, 0x8b, 0xdd, 0xe9, 0x2b,
- 0x30, 0xb7, 0xd1, 0x7a, 0xda, 0x1e, 0xf7, 0xcc, 0x89, 0xd3, 0xda, 0x6f, 0xf7, 0xbc, 0x47, 0x04,
- 0xba, 0xff, 0xf9, 0x2e, 0x6f, 0xd9, 0x6f, 0xf7, 0xc8, 0x05, 0x57, 0x5c, 0x1d, 0x6c, 0x95, 0xb8,
- 0xbc, 0x96, 0x5e, 0xa7, 0x8b, 0xc6, 0x8c, 0xa1, 0x2f, 0x5c, 0xe0, 0xbe, 0xf0, 0x61, 0x06, 0x52,
- 0x47, 0x56, 0xdf, 0xb6, 0x1e, 0xe6, 0x20, 0xe3, 0xd8, 0xe3, 0x61, 0xdb, 0xb1, 0xcb, 0x3f, 0x92,
- 0x00, 0x1e, 0xd9, 0xc3, 0xe1, 0x91, 0xd5, 0x7f, 0x71, 0x64, 0x92, 0x2b, 0x90, 0x1f, 0xb6, 0x9f,
- 0x9b, 0xad, 0xa1, 0xd9, 0x3a, 0x1c, 0xbb, 0xe7, 0x20, 0x47, 0xab, 0xb6, 0xcd, 0x47, 0xe3, 0x13,
- 0x52, 0x72, 0xaf, 0xe8, 0xa8, 0x1d, 0x94, 0x24, 0xbf, 0xb2, 0x2f, 0xf2, 0x4b, 0x67, 0x9a, 0xef,
- 0xa1, 0x7b, 0xed, 0x64, 0x79, 0x44, 0x86, 0xef, 0x1e, 0x96, 0xa8, 0xe4, 0x1d, 0x73, 0x38, 0x6a,
- 0x1d, 0xa2, 0x54, 0xa8, 0x1c, 0x52, 0xb4, 0xfc, 0x88, 0xdc, 0x86, 0xc4, 0xa1, 0x3d, 0x40, 0x91,
- 0x9c, 0xb2, 0x2f, 0x14, 0x47, 0xde, 0x80, 0xc4, 0x70, 0xc2, 0x64, 0x93, 0xd7, 0x16, 0x84, 0x7b,
- 0x02, 0x0b, 0x4d, 0x14, 0x36, 0x9c, 0xf4, 0xbc, 0x79, 0xdf, 0x28, 0x42, 0x62, 0xa3, 0xd9, 0xa4,
- 0xb1, 0x7f, 0xa3, 0xd9, 0x5c, 0x53, 0xa4, 0xfa, 0x97, 0x20, 0xdb, 0x1b, 0x9b, 0x26, 0x75, 0x0f,
- 0xb3, 0x73, 0x8e, 0x0f, 0x31, 0xd6, 0x79, 0xa0, 0xfa, 0x36, 0x64, 0x0e, 0x59, 0xd6, 0x41, 0x22,
- 0xd2, 0xda, 0xd2, 0x1f, 0xb2, 0x47, 0x95, 0x25, 0xbf, 0x39, 0x9c, 0xa7, 0x18, 0xae, 0x8d, 0xfa,
- 0x2e, 0xe4, 0xc6, 0xad, 0xd3, 0x0c, 0x7e, 0xcc, 0xa2, 0x4b, 0x9c, 0xc1, 0xec, 0x98, 0x57, 0xd5,
- 0x1b, 0xb0, 0x60, 0xd9, 0xee, 0x37, 0x94, 0x56, 0x87, 0x9d, 0xb1, 0x8b, 0xd3, 0x57, 0x39, 0xd7,
- 0xb8, 0xc9, 0xbe, 0x5b, 0x5a, 0x36, 0x6f, 0x60, 0xa7, 0xb2, 0xfe, 0x08, 0x14, 0xc1, 0x0c, 0xa6,
- 0x9e, 0x71, 0x56, 0xba, 0xec, 0x43, 0xa9, 0x67, 0x05, 0xcf, 0x7d, 0xc8, 0x08, 0x3b, 0x99, 0x31,
- 0x46, 0x7a, 0xec, 0xab, 0xb3, 0x67, 0x04, 0x5d, 0xdd, 0xb4, 0x11, 0xea, 0x6b, 0xa2, 0x8d, 0x3c,
- 0x63, 0x1f, 0xa4, 0x45, 0x23, 0x35, 0x3d, 0xb4, 0x2a, 0x47, 0xa7, 0x0e, 0xa5, 0xcf, 0xbe, 0x27,
- 0x7b, 0x56, 0x98, 0x03, 0x9c, 0x61, 0x26, 0x7e, 0x30, 0x1f, 0xb2, 0x4f, 0xcd, 0x01, 0x33, 0x53,
- 0xa3, 0x99, 0x9c, 0x3a, 0x9a, 0xe7, 0xec, 0xbb, 0xae, 0x67, 0x66, 0x6f, 0xd6, 0x68, 0x26, 0xa7,
- 0x8e, 0x66, 0xc0, 0xbe, 0xf8, 0x06, 0xcc, 0xd4, 0xf4, 0xfa, 0x26, 0x10, 0x71, 0xab, 0x79, 0x9c,
- 0x88, 0xb1, 0x33, 0x64, 0xdf, 0xf1, 0xfd, 0xcd, 0x66, 0x94, 0x59, 0x86, 0xe2, 0x07, 0x64, 0xb1,
- 0x4f, 0xfc, 0x41, 0x43, 0x35, 0xbd, 0xbe, 0x05, 0xe7, 0xc5, 0x89, 0x9d, 0x61, 0x48, 0xb6, 0x2a,
- 0x55, 0x8a, 0xc6, 0x82, 0x3f, 0x35, 0xce, 0x99, 0x69, 0x2a, 0x7e, 0x50, 0x23, 0x55, 0xaa, 0x28,
- 0x53, 0xa6, 0x6a, 0x7a, 0xfd, 0x01, 0x14, 0x05, 0x53, 0x07, 0x18, 0xa1, 0xa3, 0xcd, 0xbc, 0x60,
- 0xff, 0x6b, 0xe1, 0x99, 0xa1, 0x11, 0x3d, 0xbc, 0x63, 0x3c, 0xc6, 0x45, 0x1b, 0x19, 0xb3, 0x7f,
- 0x14, 0xf0, 0xc7, 0x82, 0x8c, 0xd0, 0x91, 0xc0, 0xfc, 0x3b, 0xce, 0xca, 0x84, 0xfd, 0x0b, 0x81,
- 0x3f, 0x14, 0x4a, 0xa8, 0xf7, 0x03, 0xd3, 0x31, 0x69, 0x90, 0x8b, 0xb1, 0xe1, 0xa0, 0x47, 0x7e,
- 0x33, 0x12, 0xb0, 0x22, 0x3e, 0x90, 0x08, 0xd3, 0xa6, 0xc5, 0xfa, 0x16, 0xcc, 0x9f, 0xdd, 0x21,
- 0x7d, 0x2c, 0xb1, 0x6c, 0xb9, 0xba, 0x42, 0x13, 0x6a, 0x63, 0xae, 0x13, 0xf0, 0x4b, 0x0d, 0x98,
- 0x3b, 0xb3, 0x53, 0xfa, 0x44, 0x62, 0x39, 0x27, 0xb5, 0x64, 0x14, 0x3a, 0x41, 0xcf, 0x34, 0x77,
- 0x66, 0xb7, 0xf4, 0xa9, 0xc4, 0x1e, 0x28, 0x74, 0xcd, 0x33, 0xe2, 0x7a, 0xa6, 0xb9, 0x33, 0xbb,
- 0xa5, 0xaf, 0xb2, 0x8c, 0x52, 0xd6, 0xab, 0xa2, 0x11, 0xf4, 0x05, 0xf3, 0x67, 0x77, 0x4b, 0x5f,
- 0x93, 0xf0, 0xb1, 0x42, 0xd6, 0x75, 0x6f, 0x5d, 0x3c, 0xcf, 0x34, 0x7f, 0x76, 0xb7, 0xf4, 0x75,
- 0x09, 0x9f, 0x34, 0x64, 0x7d, 0x3d, 0x60, 0x26, 0x38, 0x9a, 0xd3, 0xdd, 0xd2, 0x37, 0x24, 0x7c,
- 0x65, 0x90, 0xf5, 0x9a, 0x67, 0x66, 0x6f, 0x6a, 0x34, 0xa7, 0xbb, 0xa5, 0x6f, 0xe2, 0x2d, 0xbe,
- 0x2e, 0xeb, 0x77, 0x02, 0x66, 0xd0, 0x33, 0x15, 0x5f, 0xc1, 0x2d, 0x7d, 0x4b, 0xc2, 0xc7, 0x20,
- 0x59, 0xbf, 0x6b, 0xb8, 0xbd, 0xfb, 0x9e, 0xa9, 0xf8, 0x0a, 0x6e, 0xe9, 0x33, 0x09, 0xdf, 0x8c,
- 0x64, 0xfd, 0x5e, 0xd0, 0x10, 0x7a, 0x26, 0xe5, 0x55, 0xdc, 0xd2, 0xb7, 0xa9, 0xa5, 0x62, 0x5d,
- 0x5e, 0x5f, 0x35, 0xdc, 0x01, 0x08, 0x9e, 0x49, 0x79, 0x15, 0xb7, 0xf4, 0x1d, 0x6a, 0x4a, 0xa9,
- 0xcb, 0xeb, 0x6b, 0x21, 0x53, 0x35, 0xbd, 0xfe, 0x08, 0x0a, 0x67, 0x75, 0x4b, 0xdf, 0x15, 0xdf,
- 0xe2, 0xf2, 0x1d, 0xc1, 0x37, 0xed, 0x0a, 0x7b, 0x76, 0xaa, 0x63, 0xfa, 0x1e, 0xe6, 0x38, 0xf5,
- 0xb9, 0x27, 0xec, 0xbd, 0x8a, 0x11, 0xfc, 0xed, 0x63, 0x6e, 0x6a, 0xdb, 0x3f, 0x1f, 0xa7, 0xfa,
- 0xa8, 0xef, 0x4b, 0xf8, 0xa8, 0x55, 0xe0, 0x06, 0x11, 0xef, 0x9d, 0x14, 0xe6, 0xb0, 0x3e, 0xf4,
- 0x67, 0x79, 0x9a, 0xb7, 0xfa, 0x81, 0xf4, 0x2a, 0xee, 0xaa, 0x9e, 0x68, 0xee, 0x34, 0xbc, 0xc5,
- 0xc0, 0x9a, 0xb7, 0x21, 0x79, 0xac, 0xad, 0xae, 0x89, 0x57, 0x32, 0xf1, 0x2d, 0x97, 0x39, 0xa9,
- 0xbc, 0x56, 0x14, 0x9e, 0xbb, 0x87, 0x23, 0xe7, 0xc4, 0x40, 0x16, 0x67, 0x6b, 0x91, 0xec, 0x4f,
- 0x62, 0xd8, 0x1a, 0x67, 0x57, 0x23, 0xd9, 0x9f, 0xc6, 0xb0, 0xab, 0x9c, 0xad, 0x47, 0xb2, 0xbf,
- 0x1a, 0xc3, 0xd6, 0x39, 0x7b, 0x3d, 0x92, 0xfd, 0xb5, 0x18, 0xf6, 0x3a, 0x67, 0xd7, 0x22, 0xd9,
- 0x5f, 0x8f, 0x61, 0xd7, 0x38, 0xfb, 0x4e, 0x24, 0xfb, 0x1b, 0x31, 0xec, 0x3b, 0x9c, 0x7d, 0x37,
- 0x92, 0xfd, 0xcd, 0x18, 0xf6, 0x5d, 0xce, 0xbe, 0x17, 0xc9, 0xfe, 0x56, 0x0c, 0xfb, 0x1e, 0x63,
- 0xaf, 0xad, 0x46, 0xb2, 0x3f, 0x8b, 0x66, 0xaf, 0xad, 0x72, 0x76, 0xb4, 0xd6, 0xbe, 0x1d, 0xc3,
- 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0x77, 0x62, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x6e,
- 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0xdf, 0x8b, 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed,
- 0xfb, 0x31, 0x6c, 0xae, 0xb5, 0xb5, 0x68, 0xad, 0xfd, 0x20, 0x86, 0xcd, 0xb5, 0xb6, 0x16, 0xad,
- 0xb5, 0x3f, 0x8a, 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x8f, 0x63, 0xd8, 0x5c, 0x6b, 0x6b,
- 0xd1, 0x5a, 0xfb, 0x93, 0x18, 0x36, 0xd7, 0x9a, 0x16, 0xad, 0xb5, 0x3f, 0x8d, 0x66, 0x6b, 0x5c,
- 0x6b, 0x5a, 0xb4, 0xd6, 0xfe, 0x2c, 0x86, 0xcd, 0xb5, 0xa6, 0x45, 0x6b, 0xed, 0xcf, 0x63, 0xd8,
- 0x5c, 0x6b, 0x5a, 0xb4, 0xd6, 0x7e, 0x18, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0x17, 0x31,
- 0x6c, 0xae, 0x35, 0x2d, 0x5a, 0x6b, 0x7f, 0x19, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0x57,
- 0x31, 0x6c, 0xae, 0x35, 0x2d, 0x5a, 0x6b, 0x7f, 0x1d, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6,
- 0x37, 0x31, 0x6c, 0xae, 0x35, 0x2d, 0x5a, 0x6b, 0x7f, 0x1b, 0xc3, 0xe6, 0x5a, 0xab, 0x46, 0x6b,
- 0xed, 0xef, 0xa2, 0xd9, 0x55, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x3e, 0x86, 0xcd, 0xb5, 0x56,
- 0x8d, 0xd6, 0xda, 0x3f, 0xc4, 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0xc7, 0x18, 0x36, 0xd7,
- 0x5a, 0x35, 0x5a, 0x6b, 0x3f, 0x8a, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0x4f, 0x31, 0x6c,
- 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x39, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc4,
- 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0xd7, 0x18, 0x36, 0xd7, 0x5a, 0x35, 0x5a, 0x6b, 0xff,
- 0x16, 0xc3, 0xe6, 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0xef, 0xd1, 0x6c, 0x9d, 0x6b, 0x4d, 0x8f, 0xd6,
- 0xda, 0x7f, 0xc4, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x67, 0x0c, 0x9b, 0x6b, 0x4d, 0x8f,
- 0xd6, 0xda, 0x7f, 0xc5, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x77, 0x0c, 0x9b, 0x6b, 0x4d,
- 0x8f, 0xd6, 0xda, 0xff, 0xc4, 0xb0, 0xb9, 0xd6, 0xf4, 0x68, 0xad, 0xfd, 0x38, 0x86, 0xcd, 0xb5,
- 0xa6, 0x47, 0x6b, 0xed, 0x27, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0xff, 0x1b, 0xc3, 0xe6,
- 0x5a, 0xd3, 0xa3, 0xb5, 0xf6, 0x7f, 0x31, 0x6c, 0xae, 0xb5, 0xf5, 0x68, 0xad, 0xfd, 0x7f, 0x34,
- 0x7b, 0x7d, 0xf5, 0xa7, 0x01, 0x00, 0x00, 0xff, 0xff, 0x40, 0x32, 0xb7, 0xac, 0x57, 0x39, 0x00,
- 0x00,
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/testdata/test.proto b/vendor/src/github.com/golang/protobuf/proto/testdata/test.proto
deleted file mode 100644
index 70e3cfc..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/testdata/test.proto
+++ /dev/null
@@ -1,548 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// A feature-rich test file for the protocol compiler and libraries.
-
-syntax = "proto2";
-
-package testdata;
-
-enum FOO { FOO1 = 1; };
-
-message GoEnum {
- required FOO foo = 1;
-}
-
-message GoTestField {
- required string Label = 1;
- required string Type = 2;
-}
-
-message GoTest {
- // An enum, for completeness.
- enum KIND {
- VOID = 0;
-
- // Basic types
- BOOL = 1;
- BYTES = 2;
- FINGERPRINT = 3;
- FLOAT = 4;
- INT = 5;
- STRING = 6;
- TIME = 7;
-
- // Groupings
- TUPLE = 8;
- ARRAY = 9;
- MAP = 10;
-
- // Table types
- TABLE = 11;
-
- // Functions
- FUNCTION = 12; // last tag
- };
-
- // Some typical parameters
- required KIND Kind = 1;
- optional string Table = 2;
- optional int32 Param = 3;
-
- // Required, repeated and optional foreign fields.
- required GoTestField RequiredField = 4;
- repeated GoTestField RepeatedField = 5;
- optional GoTestField OptionalField = 6;
-
- // Required fields of all basic types
- required bool F_Bool_required = 10;
- required int32 F_Int32_required = 11;
- required int64 F_Int64_required = 12;
- required fixed32 F_Fixed32_required = 13;
- required fixed64 F_Fixed64_required = 14;
- required uint32 F_Uint32_required = 15;
- required uint64 F_Uint64_required = 16;
- required float F_Float_required = 17;
- required double F_Double_required = 18;
- required string F_String_required = 19;
- required bytes F_Bytes_required = 101;
- required sint32 F_Sint32_required = 102;
- required sint64 F_Sint64_required = 103;
-
- // Repeated fields of all basic types
- repeated bool F_Bool_repeated = 20;
- repeated int32 F_Int32_repeated = 21;
- repeated int64 F_Int64_repeated = 22;
- repeated fixed32 F_Fixed32_repeated = 23;
- repeated fixed64 F_Fixed64_repeated = 24;
- repeated uint32 F_Uint32_repeated = 25;
- repeated uint64 F_Uint64_repeated = 26;
- repeated float F_Float_repeated = 27;
- repeated double F_Double_repeated = 28;
- repeated string F_String_repeated = 29;
- repeated bytes F_Bytes_repeated = 201;
- repeated sint32 F_Sint32_repeated = 202;
- repeated sint64 F_Sint64_repeated = 203;
-
- // Optional fields of all basic types
- optional bool F_Bool_optional = 30;
- optional int32 F_Int32_optional = 31;
- optional int64 F_Int64_optional = 32;
- optional fixed32 F_Fixed32_optional = 33;
- optional fixed64 F_Fixed64_optional = 34;
- optional uint32 F_Uint32_optional = 35;
- optional uint64 F_Uint64_optional = 36;
- optional float F_Float_optional = 37;
- optional double F_Double_optional = 38;
- optional string F_String_optional = 39;
- optional bytes F_Bytes_optional = 301;
- optional sint32 F_Sint32_optional = 302;
- optional sint64 F_Sint64_optional = 303;
-
- // Default-valued fields of all basic types
- optional bool F_Bool_defaulted = 40 [default=true];
- optional int32 F_Int32_defaulted = 41 [default=32];
- optional int64 F_Int64_defaulted = 42 [default=64];
- optional fixed32 F_Fixed32_defaulted = 43 [default=320];
- optional fixed64 F_Fixed64_defaulted = 44 [default=640];
- optional uint32 F_Uint32_defaulted = 45 [default=3200];
- optional uint64 F_Uint64_defaulted = 46 [default=6400];
- optional float F_Float_defaulted = 47 [default=314159.];
- optional double F_Double_defaulted = 48 [default=271828.];
- optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
- optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
- optional sint32 F_Sint32_defaulted = 402 [default = -32];
- optional sint64 F_Sint64_defaulted = 403 [default = -64];
-
- // Packed repeated fields (no string or bytes).
- repeated bool F_Bool_repeated_packed = 50 [packed=true];
- repeated int32 F_Int32_repeated_packed = 51 [packed=true];
- repeated int64 F_Int64_repeated_packed = 52 [packed=true];
- repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
- repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
- repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
- repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
- repeated float F_Float_repeated_packed = 57 [packed=true];
- repeated double F_Double_repeated_packed = 58 [packed=true];
- repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
- repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
-
- // Required, repeated, and optional groups.
- required group RequiredGroup = 70 {
- required string RequiredField = 71;
- };
-
- repeated group RepeatedGroup = 80 {
- required string RequiredField = 81;
- };
-
- optional group OptionalGroup = 90 {
- required string RequiredField = 91;
- };
-}
-
-// For testing a group containing a required field.
-message GoTestRequiredGroupField {
- required group Group = 1 {
- required int32 Field = 2;
- };
-}
-
-// For testing skipping of unrecognized fields.
-// Numbers are all big, larger than tag numbers in GoTestField,
-// the message used in the corresponding test.
-message GoSkipTest {
- required int32 skip_int32 = 11;
- required fixed32 skip_fixed32 = 12;
- required fixed64 skip_fixed64 = 13;
- required string skip_string = 14;
- required group SkipGroup = 15 {
- required int32 group_int32 = 16;
- required string group_string = 17;
- }
-}
-
-// For testing packed/non-packed decoder switching.
-// A serialized instance of one should be deserializable as the other.
-message NonPackedTest {
- repeated int32 a = 1;
-}
-
-message PackedTest {
- repeated int32 b = 1 [packed=true];
-}
-
-message MaxTag {
- // Maximum possible tag number.
- optional string last_field = 536870911;
-}
-
-message OldMessage {
- message Nested {
- optional string name = 1;
- }
- optional Nested nested = 1;
-
- optional int32 num = 2;
-}
-
-// NewMessage is wire compatible with OldMessage;
-// imagine it as a future version.
-message NewMessage {
- message Nested {
- optional string name = 1;
- optional string food_group = 2;
- }
- optional Nested nested = 1;
-
- // This is an int32 in OldMessage.
- optional int64 num = 2;
-}
-
-// Smaller tests for ASCII formatting.
-
-message InnerMessage {
- required string host = 1;
- optional int32 port = 2 [default=4000];
- optional bool connected = 3;
-}
-
-message OtherMessage {
- optional int64 key = 1;
- optional bytes value = 2;
- optional float weight = 3;
- optional InnerMessage inner = 4;
-
- extensions 100 to max;
-}
-
-message RequiredInnerMessage {
- required InnerMessage leo_finally_won_an_oscar = 1;
-}
-
-message MyMessage {
- required int32 count = 1;
- optional string name = 2;
- optional string quote = 3;
- repeated string pet = 4;
- optional InnerMessage inner = 5;
- repeated OtherMessage others = 6;
- optional RequiredInnerMessage we_must_go_deeper = 13;
- repeated InnerMessage rep_inner = 12;
-
- enum Color {
- RED = 0;
- GREEN = 1;
- BLUE = 2;
- };
- optional Color bikeshed = 7;
-
- optional group SomeGroup = 8 {
- optional int32 group_field = 9;
- }
-
- // This field becomes [][]byte in the generated code.
- repeated bytes rep_bytes = 10;
-
- optional double bigfloat = 11;
-
- extensions 100 to max;
-}
-
-message Ext {
- extend MyMessage {
- optional Ext more = 103;
- optional string text = 104;
- optional int32 number = 105;
- }
-
- optional string data = 1;
-}
-
-extend MyMessage {
- repeated string greeting = 106;
-}
-
-message ComplexExtension {
- optional int32 first = 1;
- optional int32 second = 2;
- repeated int32 third = 3;
-}
-
-extend OtherMessage {
- optional ComplexExtension complex = 200;
- repeated ComplexExtension r_complex = 201;
-}
-
-message DefaultsMessage {
- enum DefaultsEnum {
- ZERO = 0;
- ONE = 1;
- TWO = 2;
- };
- extensions 100 to max;
-}
-
-extend DefaultsMessage {
- optional double no_default_double = 101;
- optional float no_default_float = 102;
- optional int32 no_default_int32 = 103;
- optional int64 no_default_int64 = 104;
- optional uint32 no_default_uint32 = 105;
- optional uint64 no_default_uint64 = 106;
- optional sint32 no_default_sint32 = 107;
- optional sint64 no_default_sint64 = 108;
- optional fixed32 no_default_fixed32 = 109;
- optional fixed64 no_default_fixed64 = 110;
- optional sfixed32 no_default_sfixed32 = 111;
- optional sfixed64 no_default_sfixed64 = 112;
- optional bool no_default_bool = 113;
- optional string no_default_string = 114;
- optional bytes no_default_bytes = 115;
- optional DefaultsMessage.DefaultsEnum no_default_enum = 116;
-
- optional double default_double = 201 [default = 3.1415];
- optional float default_float = 202 [default = 3.14];
- optional int32 default_int32 = 203 [default = 42];
- optional int64 default_int64 = 204 [default = 43];
- optional uint32 default_uint32 = 205 [default = 44];
- optional uint64 default_uint64 = 206 [default = 45];
- optional sint32 default_sint32 = 207 [default = 46];
- optional sint64 default_sint64 = 208 [default = 47];
- optional fixed32 default_fixed32 = 209 [default = 48];
- optional fixed64 default_fixed64 = 210 [default = 49];
- optional sfixed32 default_sfixed32 = 211 [default = 50];
- optional sfixed64 default_sfixed64 = 212 [default = 51];
- optional bool default_bool = 213 [default = true];
- optional string default_string = 214 [default = "Hello, string"];
- optional bytes default_bytes = 215 [default = "Hello, bytes"];
- optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE];
-}
-
-message MyMessageSet {
- option message_set_wire_format = true;
- extensions 100 to max;
-}
-
-message Empty {
-}
-
-extend MyMessageSet {
- optional Empty x201 = 201;
- optional Empty x202 = 202;
- optional Empty x203 = 203;
- optional Empty x204 = 204;
- optional Empty x205 = 205;
- optional Empty x206 = 206;
- optional Empty x207 = 207;
- optional Empty x208 = 208;
- optional Empty x209 = 209;
- optional Empty x210 = 210;
- optional Empty x211 = 211;
- optional Empty x212 = 212;
- optional Empty x213 = 213;
- optional Empty x214 = 214;
- optional Empty x215 = 215;
- optional Empty x216 = 216;
- optional Empty x217 = 217;
- optional Empty x218 = 218;
- optional Empty x219 = 219;
- optional Empty x220 = 220;
- optional Empty x221 = 221;
- optional Empty x222 = 222;
- optional Empty x223 = 223;
- optional Empty x224 = 224;
- optional Empty x225 = 225;
- optional Empty x226 = 226;
- optional Empty x227 = 227;
- optional Empty x228 = 228;
- optional Empty x229 = 229;
- optional Empty x230 = 230;
- optional Empty x231 = 231;
- optional Empty x232 = 232;
- optional Empty x233 = 233;
- optional Empty x234 = 234;
- optional Empty x235 = 235;
- optional Empty x236 = 236;
- optional Empty x237 = 237;
- optional Empty x238 = 238;
- optional Empty x239 = 239;
- optional Empty x240 = 240;
- optional Empty x241 = 241;
- optional Empty x242 = 242;
- optional Empty x243 = 243;
- optional Empty x244 = 244;
- optional Empty x245 = 245;
- optional Empty x246 = 246;
- optional Empty x247 = 247;
- optional Empty x248 = 248;
- optional Empty x249 = 249;
- optional Empty x250 = 250;
-}
-
-message MessageList {
- repeated group Message = 1 {
- required string name = 2;
- required int32 count = 3;
- }
-}
-
-message Strings {
- optional string string_field = 1;
- optional bytes bytes_field = 2;
-}
-
-message Defaults {
- enum Color {
- RED = 0;
- GREEN = 1;
- BLUE = 2;
- }
-
- // Default-valued fields of all basic types.
- // Same as GoTest, but copied here to make testing easier.
- optional bool F_Bool = 1 [default=true];
- optional int32 F_Int32 = 2 [default=32];
- optional int64 F_Int64 = 3 [default=64];
- optional fixed32 F_Fixed32 = 4 [default=320];
- optional fixed64 F_Fixed64 = 5 [default=640];
- optional uint32 F_Uint32 = 6 [default=3200];
- optional uint64 F_Uint64 = 7 [default=6400];
- optional float F_Float = 8 [default=314159.];
- optional double F_Double = 9 [default=271828.];
- optional string F_String = 10 [default="hello, \"world!\"\n"];
- optional bytes F_Bytes = 11 [default="Bignose"];
- optional sint32 F_Sint32 = 12 [default=-32];
- optional sint64 F_Sint64 = 13 [default=-64];
- optional Color F_Enum = 14 [default=GREEN];
-
- // More fields with crazy defaults.
- optional float F_Pinf = 15 [default=inf];
- optional float F_Ninf = 16 [default=-inf];
- optional float F_Nan = 17 [default=nan];
-
- // Sub-message.
- optional SubDefaults sub = 18;
-
- // Redundant but explicit defaults.
- optional string str_zero = 19 [default=""];
-}
-
-message SubDefaults {
- optional int64 n = 1 [default=7];
-}
-
-message RepeatedEnum {
- enum Color {
- RED = 1;
- }
- repeated Color color = 1;
-}
-
-message MoreRepeated {
- repeated bool bools = 1;
- repeated bool bools_packed = 2 [packed=true];
- repeated int32 ints = 3;
- repeated int32 ints_packed = 4 [packed=true];
- repeated int64 int64s_packed = 7 [packed=true];
- repeated string strings = 5;
- repeated fixed32 fixeds = 6;
-}
-
-// GroupOld and GroupNew have the same wire format.
-// GroupNew has a new field inside a group.
-
-message GroupOld {
- optional group G = 101 {
- optional int32 x = 2;
- }
-}
-
-message GroupNew {
- optional group G = 101 {
- optional int32 x = 2;
- optional int32 y = 3;
- }
-}
-
-message FloatingPoint {
- required double f = 1;
- optional bool exact = 2;
-}
-
-message MessageWithMap {
- map name_mapping = 1;
- map msg_mapping = 2;
- map byte_mapping = 3;
- map str_to_str = 4;
-}
-
-message Oneof {
- oneof union {
- bool F_Bool = 1;
- int32 F_Int32 = 2;
- int64 F_Int64 = 3;
- fixed32 F_Fixed32 = 4;
- fixed64 F_Fixed64 = 5;
- uint32 F_Uint32 = 6;
- uint64 F_Uint64 = 7;
- float F_Float = 8;
- double F_Double = 9;
- string F_String = 10;
- bytes F_Bytes = 11;
- sint32 F_Sint32 = 12;
- sint64 F_Sint64 = 13;
- MyMessage.Color F_Enum = 14;
- GoTestField F_Message = 15;
- group F_Group = 16 {
- optional int32 x = 17;
- }
- int32 F_Largest_Tag = 536870911;
- }
-
- oneof tormato {
- int32 value = 100;
- }
-}
-
-message Communique {
- optional bool make_me_cry = 1;
-
- // This is a oneof, called "union".
- oneof union {
- int32 number = 5;
- string name = 6;
- bytes data = 7;
- double temp_c = 8;
- MyMessage.Color col = 9;
- Strings msg = 10;
- }
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/text.go b/vendor/src/github.com/golang/protobuf/proto/text.go
deleted file mode 100644
index 965876b..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/text.go
+++ /dev/null
@@ -1,854 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for writing the text protocol buffer format.
-
-import (
- "bufio"
- "bytes"
- "encoding"
- "errors"
- "fmt"
- "io"
- "log"
- "math"
- "reflect"
- "sort"
- "strings"
-)
-
-var (
- newline = []byte("\n")
- spaces = []byte(" ")
- gtNewline = []byte(">\n")
- endBraceNewline = []byte("}\n")
- backslashN = []byte{'\\', 'n'}
- backslashR = []byte{'\\', 'r'}
- backslashT = []byte{'\\', 't'}
- backslashDQ = []byte{'\\', '"'}
- backslashBS = []byte{'\\', '\\'}
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-type writer interface {
- io.Writer
- WriteByte(byte) error
-}
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- ind int
- complete bool // if the current position is a complete line
- compact bool // whether to write out as a one-liner
- w writer
-}
-
-func (w *textWriter) WriteString(s string) (n int, err error) {
- if !strings.Contains(s, "\n") {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
- return io.WriteString(w.w, s)
- }
- // WriteString is typically called without newlines, so this
- // codepath and its copy are rare. We copy to avoid
- // duplicating all of Write's logic here.
- return w.Write([]byte(s))
-}
-
-func (w *textWriter) Write(p []byte) (n int, err error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- n, err = w.w.Write(p)
- w.complete = false
- return n, err
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- if err := w.w.WriteByte(' '); err != nil {
- return n, err
- }
- n++
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- nn, err := w.w.Write(frag)
- n += nn
- if err != nil {
- return n, err
- }
- if i+1 < len(frags) {
- if err := w.w.WriteByte('\n'); err != nil {
- return n, err
- }
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- err := w.w.WriteByte(c)
- w.complete = c == '\n'
- return err
-}
-
-func (w *textWriter) indent() { w.ind++ }
-
-func (w *textWriter) unindent() {
- if w.ind == 0 {
- log.Print("proto: textWriter unindented too far")
- return
- }
- w.ind--
-}
-
-func writeName(w *textWriter, props *Properties) error {
- if _, err := w.WriteString(props.OrigName); err != nil {
- return err
- }
- if props.Wire != "group" {
- return w.WriteByte(':')
- }
- return nil
-}
-
-// raw is the interface satisfied by RawMessage.
-type raw interface {
- Bytes() []byte
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// isAny reports whether sv is a google.protobuf.Any message
-func isAny(sv reflect.Value) bool {
- type wkt interface {
- XXX_WellKnownType() string
- }
- t, ok := sv.Addr().Interface().(wkt)
- return ok && t.XXX_WellKnownType() == "Any"
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
- turl := sv.FieldByName("TypeUrl")
- val := sv.FieldByName("Value")
- if !turl.IsValid() || !val.IsValid() {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- b, ok := val.Interface().([]byte)
- if !ok {
- return true, errors.New("proto: invalid google.protobuf.Any message")
- }
-
- parts := strings.Split(turl.String(), "/")
- mt := MessageType(parts[len(parts)-1])
- if mt == nil {
- return false, nil
- }
- m := reflect.New(mt.Elem())
- if err := Unmarshal(b, m.Interface().(Message)); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- u := turl.String()
- if requiresQuotes(u) {
- writeString(w, u)
- } else {
- w.Write([]byte(u))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.ind++
- }
- if err := tm.writeStruct(w, m.Elem()); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.ind--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
- if tm.ExpandAny && isAny(sv) {
- if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
- return err
- }
- }
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < sv.NumField(); i++ {
- fv := sv.Field(i)
- props := sprops.Prop[i]
- name := st.Field(i).Name
-
- if strings.HasPrefix(name, "XXX_") {
- // There are two XXX_ fields:
- // XXX_unrecognized []byte
- // XXX_extensions map[int32]proto.Extension
- // The first is handled here;
- // the second is handled at the bottom of this function.
- if name == "XXX_unrecognized" && !fv.IsNil() {
- if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Field not filled in. This could be an optional field or
- // a required field that wasn't filled in. Either way, there
- // isn't anything we can show for it.
- continue
- }
- if fv.Kind() == reflect.Slice && fv.IsNil() {
- // Repeated field that is empty, or a bytes field that is unused.
- continue
- }
-
- if props.Repeated && fv.Kind() == reflect.Slice {
- // Repeated field.
- for j := 0; j < fv.Len(); j++ {
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- v := fv.Index(j)
- if v.Kind() == reflect.Ptr && v.IsNil() {
- // A nil message in a repeated field is not valid,
- // but we can handle that more gracefully than panicking.
- if _, err := w.Write([]byte("\n")); err != nil {
- return err
- }
- continue
- }
- if err := tm.writeAny(w, v, props); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if fv.Kind() == reflect.Map {
- // Map fields are rendered as a repeated struct with key/value fields.
- keys := fv.MapKeys()
- sort.Sort(mapKeys(keys))
- for _, key := range keys {
- val := fv.MapIndex(key)
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- // open struct
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- // key
- if _, err := w.WriteString("key:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- // nil values aren't legal, but we can avoid panicking because of them.
- if val.Kind() != reflect.Ptr || !val.IsNil() {
- // value
- if _, err := w.WriteString("value:"); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, val, props.mvalprop); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- // close struct
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- continue
- }
- if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
- // empty bytes field
- continue
- }
- if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
- // proto3 non-repeated scalar field; skip if zero value
- if isProto3Zero(fv) {
- continue
- }
- }
-
- if fv.Kind() == reflect.Interface {
- // Check if it is a oneof.
- if st.Field(i).Tag.Get("protobuf_oneof") != "" {
- // fv is nil, or holds a pointer to generated struct.
- // That generated struct has exactly one field,
- // which has a protobuf struct tag.
- if fv.IsNil() {
- continue
- }
- inner := fv.Elem().Elem() // interface -> *T -> T
- tag := inner.Type().Field(0).Tag.Get("protobuf")
- props = new(Properties) // Overwrite the outer props var, but not its pointee.
- props.Parse(tag)
- // Write the value in the oneof, not the oneof itself.
- fv = inner.Field(0)
-
- // Special case to cope with malformed messages gracefully:
- // If the value in the oneof is a nil pointer, don't panic
- // in writeAny.
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- // Use errors.New so writeAny won't render quotes.
- msg := errors.New("/* nil */")
- fv = reflect.ValueOf(&msg).Elem()
- }
- }
- }
-
- if err := writeName(w, props); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if b, ok := fv.Interface().(raw); ok {
- if err := writeRaw(w, b.Bytes()); err != nil {
- return err
- }
- continue
- }
-
- // Enums have a String method, so writeAny will work fine.
- if err := tm.writeAny(w, fv, props); err != nil {
- return err
- }
-
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
-
- // Extensions (the XXX_extensions field).
- pv := sv.Addr()
- if _, ok := extendable(pv.Interface()); ok {
- if err := tm.writeExtensions(w, pv); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// writeRaw writes an uninterpreted raw message.
-func writeRaw(w *textWriter, b []byte) error {
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if err := writeUnknownStruct(w, b); err != nil {
- return err
- }
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- return nil
-}
-
-// writeAny writes an arbitrary field.
-func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
- v = reflect.Indirect(v)
-
- // Floats have special cases.
- if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
- x := v.Float()
- var b []byte
- switch {
- case math.IsInf(x, 1):
- b = posInf
- case math.IsInf(x, -1):
- b = negInf
- case math.IsNaN(x):
- b = nan
- }
- if b != nil {
- _, err := w.Write(b)
- return err
- }
- // Other values are handled below.
- }
-
- // We don't attempt to serialise every possible value type; only those
- // that can occur in protocol buffers.
- switch v.Kind() {
- case reflect.Slice:
- // Should only be a []byte; repeated fields are handled in writeStruct.
- if err := writeString(w, string(v.Bytes())); err != nil {
- return err
- }
- case reflect.String:
- if err := writeString(w, v.String()); err != nil {
- return err
- }
- case reflect.Struct:
- // Required/optional group/message.
- var bra, ket byte = '<', '>'
- if props != nil && props.Wire == "group" {
- bra, ket = '{', '}'
- }
- if err := w.WriteByte(bra); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = w.Write(text); err != nil {
- return err
- }
- } else if err := tm.writeStruct(w, v); err != nil {
- return err
- }
- w.unindent()
- if err := w.WriteByte(ket); err != nil {
- return err
- }
- default:
- _, err := fmt.Fprint(w, v.Interface())
- return err
- }
- return nil
-}
-
-// equivalent to C's isprint.
-func isprint(c byte) bool {
- return c >= 0x20 && c < 0x7f
-}
-
-// writeString writes a string in the protocol buffer text format.
-// It is similar to strconv.Quote except we don't use Go escape sequences,
-// we treat the string as a byte sequence, and we use octal escapes.
-// These differences are to maintain interoperability with the other
-// languages' implementations of the text format.
-func writeString(w *textWriter, s string) error {
- // use WriteByte here to get any needed indent
- if err := w.WriteByte('"'); err != nil {
- return err
- }
- // Loop over the bytes, not the runes.
- for i := 0; i < len(s); i++ {
- var err error
- // Divergence from C++: we don't escape apostrophes.
- // There's no need to escape them, and the C++ parser
- // copes with a naked apostrophe.
- switch c := s[i]; c {
- case '\n':
- _, err = w.w.Write(backslashN)
- case '\r':
- _, err = w.w.Write(backslashR)
- case '\t':
- _, err = w.w.Write(backslashT)
- case '"':
- _, err = w.w.Write(backslashDQ)
- case '\\':
- _, err = w.w.Write(backslashBS)
- default:
- if isprint(c) {
- err = w.w.WriteByte(c)
- } else {
- _, err = fmt.Fprintf(w.w, "\\%03o", c)
- }
- }
- if err != nil {
- return err
- }
- }
- return w.WriteByte('"')
-}
-
-func writeUnknownStruct(w *textWriter, data []byte) (err error) {
- if !w.compact {
- if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
- return err
- }
- }
- b := NewBuffer(data)
- for b.index < len(b.buf) {
- x, err := b.DecodeVarint()
- if err != nil {
- _, err := fmt.Fprintf(w, "/* %v */\n", err)
- return err
- }
- wire, tag := x&7, x>>3
- if wire == WireEndGroup {
- w.unindent()
- if _, err := w.Write(endBraceNewline); err != nil {
- return err
- }
- continue
- }
- if _, err := fmt.Fprint(w, tag); err != nil {
- return err
- }
- if wire != WireStartGroup {
- if err := w.WriteByte(':'); err != nil {
- return err
- }
- }
- if !w.compact || wire == WireStartGroup {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- switch wire {
- case WireBytes:
- buf, e := b.DecodeRawBytes(false)
- if e == nil {
- _, err = fmt.Fprintf(w, "%q", buf)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", e)
- }
- case WireFixed32:
- x, err = b.DecodeFixed32()
- err = writeUnknownInt(w, x, err)
- case WireFixed64:
- x, err = b.DecodeFixed64()
- err = writeUnknownInt(w, x, err)
- case WireStartGroup:
- err = w.WriteByte('{')
- w.indent()
- case WireVarint:
- x, err = b.DecodeVarint()
- err = writeUnknownInt(w, x, err)
- default:
- _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
- }
- if err != nil {
- return err
- }
- if err = w.WriteByte('\n'); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeUnknownInt(w *textWriter, x uint64, err error) error {
- if err == nil {
- _, err = fmt.Fprint(w, x)
- } else {
- _, err = fmt.Fprintf(w, "/* %v */", err)
- }
- return err
-}
-
-type int32Slice []int32
-
-func (s int32Slice) Len() int { return len(s) }
-func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
-func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-
-// writeExtensions writes all the extensions in pv.
-// pv is assumed to be a pointer to a protocol message struct that is extendable.
-func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
- emap := extensionMaps[pv.Type().Elem()]
- ep, _ := extendable(pv.Interface())
-
- // Order the extensions by ID.
- // This isn't strictly necessary, but it will give us
- // canonical output, which will also make testing easier.
- m, mu := ep.extensionsRead()
- if m == nil {
- return nil
- }
- mu.Lock()
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids))
- mu.Unlock()
-
- for _, extNum := range ids {
- ext := m[extNum]
- var desc *ExtensionDesc
- if emap != nil {
- desc = emap[extNum]
- }
- if desc == nil {
- // Unknown extension.
- if err := writeUnknownStruct(w, ext.enc); err != nil {
- return err
- }
- continue
- }
-
- pb, err := GetExtension(ep, desc)
- if err != nil {
- return fmt.Errorf("failed getting extension: %v", err)
- }
-
- // Repeated extensions will appear as a slice.
- if !desc.repeated() {
- if err := tm.writeExtension(w, desc.Name, pb); err != nil {
- return err
- }
- } else {
- v := reflect.ValueOf(pb)
- for i := 0; i < v.Len(); i++ {
- if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
- if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte(' '); err != nil {
- return err
- }
- }
- if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
- return err
- }
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- remain := w.ind * 2
- for remain > 0 {
- n := remain
- if n > len(spaces) {
- n = len(spaces)
- }
- w.w.Write(spaces[:n])
- remain -= n
- }
- w.complete = false
-}
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line).
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
- val := reflect.ValueOf(pb)
- if pb == nil || val.IsNil() {
- w.Write([]byte(""))
- return nil
- }
- var bw *bufio.Writer
- ww, ok := w.(writer)
- if !ok {
- bw = bufio.NewWriter(w)
- ww = bw
- }
- aw := &textWriter{
- w: ww,
- complete: true,
- compact: tm.Compact,
- }
-
- if etm, ok := pb.(encoding.TextMarshaler); ok {
- text, err := etm.MarshalText()
- if err != nil {
- return err
- }
- if _, err = aw.Write(text); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
- }
- // Dereference the received pointer so we don't have outer < and >.
- v := reflect.Indirect(val)
- if err := tm.writeStruct(aw, v); err != nil {
- return err
- }
- if bw != nil {
- return bw.Flush()
- }
- return nil
-}
-
-// Text is the same as Marshal, but returns the string directly.
-func (tm *TextMarshaler) Text(pb Message) string {
- var buf bytes.Buffer
- tm.Marshal(&buf, pb)
- return buf.String()
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// TODO: consider removing some of the Marshal functions below.
-
-// MarshalText writes a given protocol buffer in text format.
-// The only errors returned are from w.
-func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
-
-// MarshalTextString is the same as MarshalText, but returns the string directly.
-func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
-
-// CompactText writes a given protocol buffer in compact text format (one line).
-func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
-
-// CompactTextString is the same as CompactText, but returns the string directly.
-func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/src/github.com/golang/protobuf/proto/text_parser.go b/vendor/src/github.com/golang/protobuf/proto/text_parser.go
deleted file mode 100644
index 4fd0531..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/text_parser.go
+++ /dev/null
@@ -1,891 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto
-
-// Functions for parsing the Text protocol buffer format.
-// TODO: message sets.
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-// Error string emitted when deserializing Any and fields are already set
-const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
-
-type ParseError struct {
- Message string
- Line int // 1-based line number
- Offset int // 0-based byte offset from start of input
-}
-
-func (p *ParseError) Error() string {
- if p.Line == 1 {
- // show offset only for first line
- return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
- }
- return fmt.Sprintf("line %d: %v", p.Line, p.Message)
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func (t *token) String() string {
- if t.err == nil {
- return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
- }
- return fmt.Sprintf("parse error: %v", t.err)
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-// Numbers and identifiers are matched by [-+._A-Za-z0-9]
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-var (
- errBadUTF8 = errors.New("proto: bad UTF-8")
- errBadHex = errors.New("proto: bad hexadecimal")
-)
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- base := 8
- ss := s[:2]
- s = s[2:]
- if r == 'x' || r == 'X' {
- base = 16
- } else {
- ss = string(r) + ss
- }
- i, err := strconv.ParseUint(ss, base, 8)
- if err != nil {
- return "", "", err
- }
- return string([]byte{byte(i)}), s, nil
- case 'u', 'U':
- n := 4
- if r == 'U' {
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
- }
-
- bs := make([]byte, n/2)
- for i := 0; i < n; i += 2 {
- a, ok1 := unhex(s[i])
- b, ok2 := unhex(s[i+1])
- if !ok1 || !ok2 {
- return "", "", errBadHex
- }
- bs[i/2] = a<<4 | b
- }
- s = s[n:]
- return string(bs), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-// Adapted from src/pkg/strconv/quote.go.
-func unhex(b byte) (v byte, ok bool) {
- switch {
- case '0' <= b && b <= '9':
- return b - '0', true
- case 'a' <= b && b <= 'f':
- return b - 'a' + 10, true
- case 'A' <= b && b <= 'F':
- return b - 'A' + 10, true
- }
- return 0, false
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-// Return a RequiredNotSetError indicating which required field was not set.
-func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
- st := sv.Type()
- sprops := GetProperties(st)
- for i := 0; i < st.NumField(); i++ {
- if !isNil(sv.Field(i)) {
- continue
- }
-
- props := sprops.Prop[i]
- if props.Required {
- return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
- }
- }
- return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
-}
-
-// Returns the index in the struct for the named field, as well as the parsed tag properties.
-func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
- i, ok := sprops.decoderOrigNames[name]
- if ok {
- return i, sprops.Prop[i], true
- }
- return -1, nil, false
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- // Colon is optional when the field is a group or message.
- needColon := true
- switch props.Wire {
- case "group":
- needColon = false
- case "bytes":
- // A "bytes" field is either a message, a string, or a repeated field;
- // those three become *T, *string and []T respectively, so we can check for
- // this field being a pointer to a non-string.
- if typ.Kind() == reflect.Ptr {
- // *T or *string
- if typ.Elem().Kind() == reflect.String {
- break
- }
- } else if typ.Kind() == reflect.Slice {
- // []T or []*T
- if typ.Elem().Kind() != reflect.Ptr {
- break
- }
- } else if typ.Kind() == reflect.String {
- // The proto3 exception is for a string field,
- // which requires a colon.
- break
- }
- needColon = false
- }
- if needColon {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
- st := sv.Type()
- sprops := GetProperties(st)
- reqCount := sprops.reqCount
- var reqFieldErr error
- fieldSet := make(map[string]bool)
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- // Looks like an extension or an Any.
- //
- // TODO: Check whether we need to handle
- // namespace rooted names (e.g. ".something.Foo").
- extName, err := p.consumeExtName()
- if err != nil {
- return err
- }
-
- if s := strings.LastIndex(extName, "/"); s >= 0 {
- // If it contains a slash, it's an Any type URL.
- messageName := extName[s+1:]
- mt := MessageType(messageName)
- if mt == nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
- }
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- v := reflect.New(mt.Elem())
- if pe := p.readStruct(v.Elem(), terminator); pe != nil {
- return pe
- }
- b, err := Marshal(v.Interface().(Message))
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", messageName, err)
- }
- if fieldSet["type_url"] {
- return p.errorf(anyRepeatedlyUnpacked, "type_url")
- }
- if fieldSet["value"] {
- return p.errorf(anyRepeatedlyUnpacked, "value")
- }
- sv.FieldByName("TypeUrl").SetString(extName)
- sv.FieldByName("Value").SetBytes(b)
- fieldSet["type_url"] = true
- fieldSet["value"] = true
- continue
- }
-
- var desc *ExtensionDesc
- // This could be faster, but it's functional.
- // TODO: Do something smarter than a linear scan.
- for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
- if d.Name == extName {
- desc = d
- break
- }
- }
- if desc == nil {
- return p.errorf("unrecognized extension %q", extName)
- }
-
- props := &Properties{}
- props.Parse(desc.Tag)
-
- typ := reflect.TypeOf(desc.ExtensionType)
- if err := p.checkForColon(props, typ); err != nil {
- return err
- }
-
- rep := desc.repeated()
-
- // Read the extension structure, and set it in
- // the value we're constructing.
- var ext reflect.Value
- if !rep {
- ext = reflect.New(typ).Elem()
- } else {
- ext = reflect.New(typ.Elem()).Elem()
- }
- if err := p.readAny(ext, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- ep := sv.Addr().Interface().(Message)
- if !rep {
- SetExtension(ep, desc, ext.Interface())
- } else {
- old, err := GetExtension(ep, desc)
- var sl reflect.Value
- if err == nil {
- sl = reflect.ValueOf(old) // existing slice
- } else {
- sl = reflect.MakeSlice(typ, 0, 1)
- }
- sl = reflect.Append(sl, ext)
- SetExtension(ep, desc, sl.Interface())
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := tok.value
- var dst reflect.Value
- fi, props, ok := structFieldByName(sprops, name)
- if ok {
- dst = sv.Field(fi)
- } else if oop, ok := sprops.OneofTypes[name]; ok {
- // It is a oneof.
- props = oop.Prop
- nv := reflect.New(oop.Type.Elem())
- dst = nv.Elem().Field(0)
- sv.Field(oop.Field).Set(nv)
- }
- if !dst.IsValid() {
- return p.errorf("unknown field name %q in %v", name, st)
- }
-
- if dst.Kind() == reflect.Map {
- // Consume any colon.
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Construct the map if it doesn't already exist.
- if dst.IsNil() {
- dst.Set(reflect.MakeMap(dst.Type()))
- }
- key := reflect.New(dst.Type().Key()).Elem()
- val := reflect.New(dst.Type().Elem()).Elem()
-
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // However, implementations may omit key or value, and technically
- // we should support them in any order. See b/28924776 for a time
- // this went wrong.
-
- tok := p.next()
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- switch tok.value {
- case "key":
- if err := p.consumeToken(":"); err != nil {
- return err
- }
- if err := p.readAny(key, props.mkeyprop); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- case "value":
- if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
- return err
- }
- if err := p.readAny(val, props.mvalprop); err != nil {
- return err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- default:
- p.back()
- return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
- }
- }
-
- dst.SetMapIndex(key, val)
- continue
- }
-
- // Check that it's not already set if it's not a repeated field.
- if !props.Repeated && fieldSet[name] {
- return p.errorf("non-repeated field %q was repeated", name)
- }
-
- if err := p.checkForColon(props, dst.Type()); err != nil {
- return err
- }
-
- // Parse into the field.
- fieldSet[name] = true
- if err := p.readAny(dst, props); err != nil {
- if _, ok := err.(*RequiredNotSetError); !ok {
- return err
- }
- reqFieldErr = err
- }
- if props.Required {
- reqCount--
- }
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
-
- }
-
- if reqCount > 0 {
- return p.missingRequiredFieldError(sv)
- }
- return reqFieldErr
-}
-
-// consumeExtName consumes extension name or expanded Any type URL and the
-// following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in readStruct to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) readAny(v reflect.Value, props *Properties) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "" {
- return p.errorf("unexpected EOF")
- }
-
- switch fv := v; fv.Kind() {
- case reflect.Slice:
- at := v.Type()
- if at.Elem().Kind() == reflect.Uint8 {
- // Special case for []byte
- if tok.value[0] != '"' && tok.value[0] != '\'' {
- // Deliberately written out here, as the error after
- // this switch statement would write "invalid []byte: ...",
- // which is not as user-friendly.
- return p.errorf("invalid string: %v", tok.value)
- }
- bytes := []byte(tok.unquoted)
- fv.Set(reflect.ValueOf(bytes))
- return nil
- }
- // Repeated field.
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- err := p.readAny(fv.Index(fv.Len()-1), props)
- if err != nil {
- return err
- }
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == "]" {
- break
- }
- if tok.value != "," {
- return p.errorf("Expected ']' or ',' found %q", tok.value)
- }
- }
- return nil
- }
- // One value of the repeated field.
- p.back()
- fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
- return p.readAny(fv.Index(fv.Len()-1), props)
- case reflect.Bool:
- // true/1/t/True or false/f/0/False.
- switch tok.value {
- case "true", "1", "t", "True":
- fv.SetBool(true)
- return nil
- case "false", "0", "f", "False":
- fv.SetBool(false)
- return nil
- }
- case reflect.Float32, reflect.Float64:
- v := tok.value
- // Ignore 'f' for compatibility with output generated by C++, but don't
- // remove 'f' when the value is "-inf" or "inf".
- if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
- v = v[:len(v)-1]
- }
- if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
- fv.SetFloat(f)
- return nil
- }
- case reflect.Int32:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- if len(props.Enum) == 0 {
- break
- }
- m, ok := enumValueMaps[props.Enum]
- if !ok {
- break
- }
- x, ok := m[tok.value]
- if !ok {
- break
- }
- fv.SetInt(int64(x))
- return nil
- case reflect.Int64:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- fv.SetInt(x)
- return nil
- }
-
- case reflect.Ptr:
- // A basic field (indirected through pointer), or a repeated message/group
- p.back()
- fv.Set(reflect.New(fv.Type().Elem()))
- return p.readAny(fv.Elem(), props)
- case reflect.String:
- if tok.value[0] == '"' || tok.value[0] == '\'' {
- fv.SetString(tok.unquoted)
- return nil
- }
- case reflect.Struct:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
- // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
- return p.readStruct(fv, terminator)
- case reflect.Uint32:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(uint64(x))
- return nil
- }
- case reflect.Uint64:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- fv.SetUint(x)
- return nil
- }
- }
- return p.errorf("invalid %v: %v", v.Type(), tok.value)
-}
-
-// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
-// before starting to unmarshal, so any existing data in pb is always removed.
-// If a required field is not set and no other error occurs,
-// UnmarshalText returns *RequiredNotSetError.
-func UnmarshalText(s string, pb Message) error {
- if um, ok := pb.(encoding.TextUnmarshaler); ok {
- err := um.UnmarshalText([]byte(s))
- return err
- }
- pb.Reset()
- v := reflect.ValueOf(pb)
- if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
- return pe
- }
- return nil
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/src/github.com/golang/protobuf/proto/text_parser_test.go
deleted file mode 100644
index b26500b..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/text_parser_test.go
+++ /dev/null
@@ -1,662 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "math"
- "reflect"
- "testing"
-
- . "github.com/golang/protobuf/proto"
- proto3pb "github.com/golang/protobuf/proto/proto3_proto"
- . "github.com/golang/protobuf/proto/testdata"
-)
-
-type UnmarshalTextTest struct {
- in string
- err string // if "", no error expected
- out *MyMessage
-}
-
-func buildExtStructTest(text string) UnmarshalTextTest {
- msg := &MyMessage{
- Count: Int32(42),
- }
- SetExtension(msg, E_Ext_More, &Ext{
- Data: String("Hello, world!"),
- })
- return UnmarshalTextTest{in: text, out: msg}
-}
-
-func buildExtDataTest(text string) UnmarshalTextTest {
- msg := &MyMessage{
- Count: Int32(42),
- }
- SetExtension(msg, E_Ext_Text, String("Hello, world!"))
- SetExtension(msg, E_Ext_Number, Int32(1729))
- return UnmarshalTextTest{in: text, out: msg}
-}
-
-func buildExtRepStringTest(text string) UnmarshalTextTest {
- msg := &MyMessage{
- Count: Int32(42),
- }
- if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
- panic(err)
- }
- return UnmarshalTextTest{in: text, out: msg}
-}
-
-var unMarshalTextTests = []UnmarshalTextTest{
- // Basic
- {
- in: " count:42\n name:\"Dave\" ",
- out: &MyMessage{
- Count: Int32(42),
- Name: String("Dave"),
- },
- },
-
- // Empty quoted string
- {
- in: `count:42 name:""`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String(""),
- },
- },
-
- // Quoted string concatenation with double quotes
- {
- in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String("My name is elsewhere"),
- },
- },
-
- // Quoted string concatenation with single quotes
- {
- in: "count:42 name: 'My name is '\n'elsewhere'",
- out: &MyMessage{
- Count: Int32(42),
- Name: String("My name is elsewhere"),
- },
- },
-
- // Quoted string concatenations with mixed quotes
- {
- in: "count:42 name: 'My name is '\n\"elsewhere\"",
- out: &MyMessage{
- Count: Int32(42),
- Name: String("My name is elsewhere"),
- },
- },
- {
- in: "count:42 name: \"My name is \"\n'elsewhere'",
- out: &MyMessage{
- Count: Int32(42),
- Name: String("My name is elsewhere"),
- },
- },
-
- // Quoted string with escaped apostrophe
- {
- in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String("HOLIDAY - New Year's Day"),
- },
- },
-
- // Quoted string with single quote
- {
- in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String(`Roger "The Ramster" Ramjet`),
- },
- },
-
- // Quoted string with all the accepted special characters from the C++ test
- {
- in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
- out: &MyMessage{
- Count: Int32(42),
- Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
- },
- },
-
- // Quoted string with quoted backslash
- {
- in: `count:42 name: "\\'xyz"`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String(`\'xyz`),
- },
- },
-
- // Quoted string with UTF-8 bytes.
- {
- in: "count:42 name: '\303\277\302\201\xAB'",
- out: &MyMessage{
- Count: Int32(42),
- Name: String("\303\277\302\201\xAB"),
- },
- },
-
- // Bad quoted string
- {
- in: `inner: < host: "\0" >` + "\n",
- err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`,
- },
-
- // Number too large for int64
- {
- in: "count: 1 others { key: 123456789012345678901 }",
- err: "line 1.23: invalid int64: 123456789012345678901",
- },
-
- // Number too large for int32
- {
- in: "count: 1234567890123",
- err: "line 1.7: invalid int32: 1234567890123",
- },
-
- // Number in hexadecimal
- {
- in: "count: 0x2beef",
- out: &MyMessage{
- Count: Int32(0x2beef),
- },
- },
-
- // Number in octal
- {
- in: "count: 024601",
- out: &MyMessage{
- Count: Int32(024601),
- },
- },
-
- // Floating point number with "f" suffix
- {
- in: "count: 4 others:< weight: 17.0f >",
- out: &MyMessage{
- Count: Int32(4),
- Others: []*OtherMessage{
- {
- Weight: Float32(17),
- },
- },
- },
- },
-
- // Floating point positive infinity
- {
- in: "count: 4 bigfloat: inf",
- out: &MyMessage{
- Count: Int32(4),
- Bigfloat: Float64(math.Inf(1)),
- },
- },
-
- // Floating point negative infinity
- {
- in: "count: 4 bigfloat: -inf",
- out: &MyMessage{
- Count: Int32(4),
- Bigfloat: Float64(math.Inf(-1)),
- },
- },
-
- // Number too large for float32
- {
- in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
- err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
- },
-
- // Number posing as a quoted string
- {
- in: `inner: < host: 12 >` + "\n",
- err: `line 1.15: invalid string: 12`,
- },
-
- // Quoted string posing as int32
- {
- in: `count: "12"`,
- err: `line 1.7: invalid int32: "12"`,
- },
-
- // Quoted string posing a float32
- {
- in: `others:< weight: "17.4" >`,
- err: `line 1.17: invalid float32: "17.4"`,
- },
-
- // Enum
- {
- in: `count:42 bikeshed: BLUE`,
- out: &MyMessage{
- Count: Int32(42),
- Bikeshed: MyMessage_BLUE.Enum(),
- },
- },
-
- // Repeated field
- {
- in: `count:42 pet: "horsey" pet:"bunny"`,
- out: &MyMessage{
- Count: Int32(42),
- Pet: []string{"horsey", "bunny"},
- },
- },
-
- // Repeated field with list notation
- {
- in: `count:42 pet: ["horsey", "bunny"]`,
- out: &MyMessage{
- Count: Int32(42),
- Pet: []string{"horsey", "bunny"},
- },
- },
-
- // Repeated message with/without colon and <>/{}
- {
- in: `count:42 others:{} others{} others:<> others:{}`,
- out: &MyMessage{
- Count: Int32(42),
- Others: []*OtherMessage{
- {},
- {},
- {},
- {},
- },
- },
- },
-
- // Missing colon for inner message
- {
- in: `count:42 inner < host: "cauchy.syd" >`,
- out: &MyMessage{
- Count: Int32(42),
- Inner: &InnerMessage{
- Host: String("cauchy.syd"),
- },
- },
- },
-
- // Missing colon for string field
- {
- in: `name "Dave"`,
- err: `line 1.5: expected ':', found "\"Dave\""`,
- },
-
- // Missing colon for int32 field
- {
- in: `count 42`,
- err: `line 1.6: expected ':', found "42"`,
- },
-
- // Missing required field
- {
- in: `name: "Pawel"`,
- err: `proto: required field "testdata.MyMessage.count" not set`,
- out: &MyMessage{
- Name: String("Pawel"),
- },
- },
-
- // Missing required field in a required submessage
- {
- in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`,
- err: `proto: required field "testdata.InnerMessage.host" not set`,
- out: &MyMessage{
- Count: Int32(42),
- WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}},
- },
- },
-
- // Repeated non-repeated field
- {
- in: `name: "Rob" name: "Russ"`,
- err: `line 1.12: non-repeated field "name" was repeated`,
- },
-
- // Group
- {
- in: `count: 17 SomeGroup { group_field: 12 }`,
- out: &MyMessage{
- Count: Int32(17),
- Somegroup: &MyMessage_SomeGroup{
- GroupField: Int32(12),
- },
- },
- },
-
- // Semicolon between fields
- {
- in: `count:3;name:"Calvin"`,
- out: &MyMessage{
- Count: Int32(3),
- Name: String("Calvin"),
- },
- },
- // Comma between fields
- {
- in: `count:4,name:"Ezekiel"`,
- out: &MyMessage{
- Count: Int32(4),
- Name: String("Ezekiel"),
- },
- },
-
- // Boolean false
- {
- in: `count:42 inner { host: "example.com" connected: false }`,
- out: &MyMessage{
- Count: Int32(42),
- Inner: &InnerMessage{
- Host: String("example.com"),
- Connected: Bool(false),
- },
- },
- },
- // Boolean true
- {
- in: `count:42 inner { host: "example.com" connected: true }`,
- out: &MyMessage{
- Count: Int32(42),
- Inner: &InnerMessage{
- Host: String("example.com"),
- Connected: Bool(true),
- },
- },
- },
- // Boolean 0
- {
- in: `count:42 inner { host: "example.com" connected: 0 }`,
- out: &MyMessage{
- Count: Int32(42),
- Inner: &InnerMessage{
- Host: String("example.com"),
- Connected: Bool(false),
- },
- },
- },
- // Boolean 1
- {
- in: `count:42 inner { host: "example.com" connected: 1 }`,
- out: &MyMessage{
- Count: Int32(42),
- Inner: &InnerMessage{
- Host: String("example.com"),
- Connected: Bool(true),
- },
- },
- },
- // Boolean f
- {
- in: `count:42 inner { host: "example.com" connected: f }`,
- out: &MyMessage{
- Count: Int32(42),
- Inner: &InnerMessage{
- Host: String("example.com"),
- Connected: Bool(false),
- },
- },
- },
- // Boolean t
- {
- in: `count:42 inner { host: "example.com" connected: t }`,
- out: &MyMessage{
- Count: Int32(42),
- Inner: &InnerMessage{
- Host: String("example.com"),
- Connected: Bool(true),
- },
- },
- },
- // Boolean False
- {
- in: `count:42 inner { host: "example.com" connected: False }`,
- out: &MyMessage{
- Count: Int32(42),
- Inner: &InnerMessage{
- Host: String("example.com"),
- Connected: Bool(false),
- },
- },
- },
- // Boolean True
- {
- in: `count:42 inner { host: "example.com" connected: True }`,
- out: &MyMessage{
- Count: Int32(42),
- Inner: &InnerMessage{
- Host: String("example.com"),
- Connected: Bool(true),
- },
- },
- },
-
- // Extension
- buildExtStructTest(`count: 42 [testdata.Ext.more]:`),
- buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
- buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
- buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
-
- // Big all-in-one
- {
- in: "count:42 # Meaning\n" +
- `name:"Dave" ` +
- `quote:"\"I didn't want to go.\"" ` +
- `pet:"bunny" ` +
- `pet:"kitty" ` +
- `pet:"horsey" ` +
- `inner:<` +
- ` host:"footrest.syd" ` +
- ` port:7001 ` +
- ` connected:true ` +
- `> ` +
- `others:<` +
- ` key:3735928559 ` +
- ` value:"\x01A\a\f" ` +
- `> ` +
- `others:<` +
- " weight:58.9 # Atomic weight of Co\n" +
- ` inner:<` +
- ` host:"lesha.mtv" ` +
- ` port:8002 ` +
- ` >` +
- `>`,
- out: &MyMessage{
- Count: Int32(42),
- Name: String("Dave"),
- Quote: String(`"I didn't want to go."`),
- Pet: []string{"bunny", "kitty", "horsey"},
- Inner: &InnerMessage{
- Host: String("footrest.syd"),
- Port: Int32(7001),
- Connected: Bool(true),
- },
- Others: []*OtherMessage{
- {
- Key: Int64(3735928559),
- Value: []byte{0x1, 'A', '\a', '\f'},
- },
- {
- Weight: Float32(58.9),
- Inner: &InnerMessage{
- Host: String("lesha.mtv"),
- Port: Int32(8002),
- },
- },
- },
- },
- },
-}
-
-func TestUnmarshalText(t *testing.T) {
- for i, test := range unMarshalTextTests {
- pb := new(MyMessage)
- err := UnmarshalText(test.in, pb)
- if test.err == "" {
- // We don't expect failure.
- if err != nil {
- t.Errorf("Test %d: Unexpected error: %v", i, err)
- } else if !reflect.DeepEqual(pb, test.out) {
- t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
- i, pb, test.out)
- }
- } else {
- // We do expect failure.
- if err == nil {
- t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
- } else if err.Error() != test.err {
- t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
- i, err.Error(), test.err)
- } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
- t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
- i, pb, test.out)
- }
- }
- }
-}
-
-func TestUnmarshalTextCustomMessage(t *testing.T) {
- msg := &textMessage{}
- if err := UnmarshalText("custom", msg); err != nil {
- t.Errorf("Unexpected error from custom unmarshal: %v", err)
- }
- if UnmarshalText("not custom", msg) == nil {
- t.Errorf("Didn't get expected error from custom unmarshal")
- }
-}
-
-// Regression test; this caused a panic.
-func TestRepeatedEnum(t *testing.T) {
- pb := new(RepeatedEnum)
- if err := UnmarshalText("color: RED", pb); err != nil {
- t.Fatal(err)
- }
- exp := &RepeatedEnum{
- Color: []RepeatedEnum_Color{RepeatedEnum_RED},
- }
- if !Equal(pb, exp) {
- t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
- }
-}
-
-func TestProto3TextParsing(t *testing.T) {
- m := new(proto3pb.Message)
- const in = `name: "Wallace" true_scotsman: true`
- want := &proto3pb.Message{
- Name: "Wallace",
- TrueScotsman: true,
- }
- if err := UnmarshalText(in, m); err != nil {
- t.Fatal(err)
- }
- if !Equal(m, want) {
- t.Errorf("\n got %v\nwant %v", m, want)
- }
-}
-
-func TestMapParsing(t *testing.T) {
- m := new(MessageWithMap)
- const in = `name_mapping: name_mapping:` +
- `msg_mapping:,>` + // separating commas are okay
- `msg_mapping>` + // no colon after "value"
- `msg_mapping:>` + // omitted key
- `msg_mapping:` + // omitted value
- `byte_mapping:` +
- `byte_mapping:<>` // omitted key and value
- want := &MessageWithMap{
- NameMapping: map[int32]string{
- 1: "Beatles",
- 1234: "Feist",
- },
- MsgMapping: map[int64]*FloatingPoint{
- -4: {F: Float64(2.0)},
- -2: {F: Float64(4.0)},
- 0: {F: Float64(5.0)},
- 1: nil,
- },
- ByteMapping: map[bool][]byte{
- false: nil,
- true: []byte("so be it"),
- },
- }
- if err := UnmarshalText(in, m); err != nil {
- t.Fatal(err)
- }
- if !Equal(m, want) {
- t.Errorf("\n got %v\nwant %v", m, want)
- }
-}
-
-func TestOneofParsing(t *testing.T) {
- const in = `name:"Shrek"`
- m := new(Communique)
- want := &Communique{Union: &Communique_Name{"Shrek"}}
- if err := UnmarshalText(in, m); err != nil {
- t.Fatal(err)
- }
- if !Equal(m, want) {
- t.Errorf("\n got %v\nwant %v", m, want)
- }
-}
-
-var benchInput string
-
-func init() {
- benchInput = "count: 4\n"
- for i := 0; i < 1000; i++ {
- benchInput += "pet: \"fido\"\n"
- }
-
- // Check it is valid input.
- pb := new(MyMessage)
- err := UnmarshalText(benchInput, pb)
- if err != nil {
- panic("Bad benchmark input: " + err.Error())
- }
-}
-
-func BenchmarkUnmarshalText(b *testing.B) {
- pb := new(MyMessage)
- for i := 0; i < b.N; i++ {
- UnmarshalText(benchInput, pb)
- }
- b.SetBytes(int64(len(benchInput)))
-}
diff --git a/vendor/src/github.com/golang/protobuf/proto/text_test.go b/vendor/src/github.com/golang/protobuf/proto/text_test.go
deleted file mode 100644
index 3eabaca..0000000
--- a/vendor/src/github.com/golang/protobuf/proto/text_test.go
+++ /dev/null
@@ -1,474 +0,0 @@
-// Go support for Protocol Buffers - Google's data interchange format
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// https://github.com/golang/protobuf
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package proto_test
-
-import (
- "bytes"
- "errors"
- "io/ioutil"
- "math"
- "strings"
- "testing"
-
- "github.com/golang/protobuf/proto"
-
- proto3pb "github.com/golang/protobuf/proto/proto3_proto"
- pb "github.com/golang/protobuf/proto/testdata"
-)
-
-// textMessage implements the methods that allow it to marshal and unmarshal
-// itself as text.
-type textMessage struct {
-}
-
-func (*textMessage) MarshalText() ([]byte, error) {
- return []byte("custom"), nil
-}
-
-func (*textMessage) UnmarshalText(bytes []byte) error {
- if string(bytes) != "custom" {
- return errors.New("expected 'custom'")
- }
- return nil
-}
-
-func (*textMessage) Reset() {}
-func (*textMessage) String() string { return "" }
-func (*textMessage) ProtoMessage() {}
-
-func newTestMessage() *pb.MyMessage {
- msg := &pb.MyMessage{
- Count: proto.Int32(42),
- Name: proto.String("Dave"),
- Quote: proto.String(`"I didn't want to go."`),
- Pet: []string{"bunny", "kitty", "horsey"},
- Inner: &pb.InnerMessage{
- Host: proto.String("footrest.syd"),
- Port: proto.Int32(7001),
- Connected: proto.Bool(true),
- },
- Others: []*pb.OtherMessage{
- {
- Key: proto.Int64(0xdeadbeef),
- Value: []byte{1, 65, 7, 12},
- },
- {
- Weight: proto.Float32(6.022),
- Inner: &pb.InnerMessage{
- Host: proto.String("lesha.mtv"),
- Port: proto.Int32(8002),
- },
- },
- },
- Bikeshed: pb.MyMessage_BLUE.Enum(),
- Somegroup: &pb.MyMessage_SomeGroup{
- GroupField: proto.Int32(8),
- },
- // One normally wouldn't do this.
- // This is an undeclared tag 13, as a varint (wire type 0) with value 4.
- XXX_unrecognized: []byte{13<<3 | 0, 4},
- }
- ext := &pb.Ext{
- Data: proto.String("Big gobs for big rats"),
- }
- if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
- panic(err)
- }
- greetings := []string{"adg", "easy", "cow"}
- if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
- panic(err)
- }
-
- // Add an unknown extension. We marshal a pb.Ext, and fake the ID.
- b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
- if err != nil {
- panic(err)
- }
- b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
- proto.SetRawExtension(msg, 201, b)
-
- // Extensions can be plain fields, too, so let's test that.
- b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
- proto.SetRawExtension(msg, 202, b)
-
- return msg
-}
-
-const text = `count: 42
-name: "Dave"
-quote: "\"I didn't want to go.\""
-pet: "bunny"
-pet: "kitty"
-pet: "horsey"
-inner: <
- host: "footrest.syd"
- port: 7001
- connected: true
->
-others: <
- key: 3735928559
- value: "\001A\007\014"
->
-others: <
- weight: 6.022
- inner: <
- host: "lesha.mtv"
- port: 8002
- >
->
-bikeshed: BLUE
-SomeGroup {
- group_field: 8
-}
-/* 2 unknown bytes */
-13: 4
-[testdata.Ext.more]: <
- data: "Big gobs for big rats"
->
-[testdata.greeting]: "adg"
-[testdata.greeting]: "easy"
-[testdata.greeting]: "cow"
-/* 13 unknown bytes */
-201: "\t3G skiing"
-/* 3 unknown bytes */
-202: 19
-`
-
-func TestMarshalText(t *testing.T) {
- buf := new(bytes.Buffer)
- if err := proto.MarshalText(buf, newTestMessage()); err != nil {
- t.Fatalf("proto.MarshalText: %v", err)
- }
- s := buf.String()
- if s != text {
- t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
- }
-}
-
-func TestMarshalTextCustomMessage(t *testing.T) {
- buf := new(bytes.Buffer)
- if err := proto.MarshalText(buf, &textMessage{}); err != nil {
- t.Fatalf("proto.MarshalText: %v", err)
- }
- s := buf.String()
- if s != "custom" {
- t.Errorf("Got %q, expected %q", s, "custom")
- }
-}
-func TestMarshalTextNil(t *testing.T) {
- want := ""
- tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
- for i, test := range tests {
- buf := new(bytes.Buffer)
- if err := proto.MarshalText(buf, test); err != nil {
- t.Fatal(err)
- }
- if got := buf.String(); got != want {
- t.Errorf("%d: got %q want %q", i, got, want)
- }
- }
-}
-
-func TestMarshalTextUnknownEnum(t *testing.T) {
- // The Color enum only specifies values 0-2.
- m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
- got := m.String()
- const want = `bikeshed:3 `
- if got != want {
- t.Errorf("\n got %q\nwant %q", got, want)
- }
-}
-
-func TestTextOneof(t *testing.T) {
- tests := []struct {
- m proto.Message
- want string
- }{
- // zero message
- {&pb.Communique{}, ``},
- // scalar field
- {&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`},
- // message field
- {&pb.Communique{Union: &pb.Communique_Msg{
- &pb.Strings{StringField: proto.String("why hello!")},
- }}, `msg:`},
- // bad oneof (should not panic)
- {&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`},
- }
- for _, test := range tests {
- got := strings.TrimSpace(test.m.String())
- if got != test.want {
- t.Errorf("\n got %s\nwant %s", got, test.want)
- }
- }
-}
-
-func BenchmarkMarshalTextBuffered(b *testing.B) {
- buf := new(bytes.Buffer)
- m := newTestMessage()
- for i := 0; i < b.N; i++ {
- buf.Reset()
- proto.MarshalText(buf, m)
- }
-}
-
-func BenchmarkMarshalTextUnbuffered(b *testing.B) {
- w := ioutil.Discard
- m := newTestMessage()
- for i := 0; i < b.N; i++ {
- proto.MarshalText(w, m)
- }
-}
-
-func compact(src string) string {
- // s/[ \n]+/ /g; s/ $//;
- dst := make([]byte, len(src))
- space, comment := false, false
- j := 0
- for i := 0; i < len(src); i++ {
- if strings.HasPrefix(src[i:], "/*") {
- comment = true
- i++
- continue
- }
- if comment && strings.HasPrefix(src[i:], "*/") {
- comment = false
- i++
- continue
- }
- if comment {
- continue
- }
- c := src[i]
- if c == ' ' || c == '\n' {
- space = true
- continue
- }
- if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
- space = false
- }
- if c == '{' {
- space = false
- }
- if space {
- dst[j] = ' '
- j++
- space = false
- }
- dst[j] = c
- j++
- }
- if space {
- dst[j] = ' '
- j++
- }
- return string(dst[0:j])
-}
-
-var compactText = compact(text)
-
-func TestCompactText(t *testing.T) {
- s := proto.CompactTextString(newTestMessage())
- if s != compactText {
- t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
- }
-}
-
-func TestStringEscaping(t *testing.T) {
- testCases := []struct {
- in *pb.Strings
- out string
- }{
- {
- // Test data from C++ test (TextFormatTest.StringEscape).
- // Single divergence: we don't escape apostrophes.
- &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
- "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
- },
- {
- // Test data from the same C++ test.
- &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
- "string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
- },
- {
- // Some UTF-8.
- &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
- `string_field: "\000\001\377\201"` + "\n",
- },
- }
-
- for i, tc := range testCases {
- var buf bytes.Buffer
- if err := proto.MarshalText(&buf, tc.in); err != nil {
- t.Errorf("proto.MarsalText: %v", err)
- continue
- }
- s := buf.String()
- if s != tc.out {
- t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
- continue
- }
-
- // Check round-trip.
- pb := new(pb.Strings)
- if err := proto.UnmarshalText(s, pb); err != nil {
- t.Errorf("#%d: UnmarshalText: %v", i, err)
- continue
- }
- if !proto.Equal(pb, tc.in) {
- t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
- }
- }
-}
-
-// A limitedWriter accepts some output before it fails.
-// This is a proxy for something like a nearly-full or imminently-failing disk,
-// or a network connection that is about to die.
-type limitedWriter struct {
- b bytes.Buffer
- limit int
-}
-
-var outOfSpace = errors.New("proto: insufficient space")
-
-func (w *limitedWriter) Write(p []byte) (n int, err error) {
- var avail = w.limit - w.b.Len()
- if avail <= 0 {
- return 0, outOfSpace
- }
- if len(p) <= avail {
- return w.b.Write(p)
- }
- n, _ = w.b.Write(p[:avail])
- return n, outOfSpace
-}
-
-func TestMarshalTextFailing(t *testing.T) {
- // Try lots of different sizes to exercise more error code-paths.
- for lim := 0; lim < len(text); lim++ {
- buf := new(limitedWriter)
- buf.limit = lim
- err := proto.MarshalText(buf, newTestMessage())
- // We expect a certain error, but also some partial results in the buffer.
- if err != outOfSpace {
- t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
- }
- s := buf.b.String()
- x := text[:buf.limit]
- if s != x {
- t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
- }
- }
-}
-
-func TestFloats(t *testing.T) {
- tests := []struct {
- f float64
- want string
- }{
- {0, "0"},
- {4.7, "4.7"},
- {math.Inf(1), "inf"},
- {math.Inf(-1), "-inf"},
- {math.NaN(), "nan"},
- }
- for _, test := range tests {
- msg := &pb.FloatingPoint{F: &test.f}
- got := strings.TrimSpace(msg.String())
- want := `f:` + test.want
- if got != want {
- t.Errorf("f=%f: got %q, want %q", test.f, got, want)
- }
- }
-}
-
-func TestRepeatedNilText(t *testing.T) {
- m := &pb.MessageList{
- Message: []*pb.MessageList_Message{
- nil,
- &pb.MessageList_Message{
- Name: proto.String("Horse"),
- },
- nil,
- },
- }
- want := `Message
-Message {
- name: "Horse"
-}
-Message
-`
- if s := proto.MarshalTextString(m); s != want {
- t.Errorf(" got: %s\nwant: %s", s, want)
- }
-}
-
-func TestProto3Text(t *testing.T) {
- tests := []struct {
- m proto.Message
- want string
- }{
- // zero message
- {&proto3pb.Message{}, ``},
- // zero message except for an empty byte slice
- {&proto3pb.Message{Data: []byte{}}, ``},
- // trivial case
- {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
- // empty map
- {&pb.MessageWithMap{}, ``},
- // non-empty map; map format is the same as a repeated struct,
- // and they are sorted by key (numerically for numeric keys).
- {
- &pb.MessageWithMap{NameMapping: map[int32]string{
- -1: "Negatory",
- 7: "Lucky",
- 1234: "Feist",
- 6345789: "Otis",
- }},
- `name_mapping: ` +
- `name_mapping: ` +
- `name_mapping: ` +
- `name_mapping:`,
- },
- // map with nil value; not well-defined, but we shouldn't crash
- {
- &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}},
- `msg_mapping:`,
- },
- }
- for _, test := range tests {
- got := strings.TrimSpace(test.m.String())
- if got != test.want {
- t.Errorf("\n got %s\nwant %s", got, test.want)
- }
- }
-}
diff --git a/vendor/src/github.com/golang/snappy/AUTHORS b/vendor/src/github.com/golang/snappy/AUTHORS
deleted file mode 100644
index bcfa195..0000000
--- a/vendor/src/github.com/golang/snappy/AUTHORS
+++ /dev/null
@@ -1,15 +0,0 @@
-# This is the official list of Snappy-Go authors for copyright purposes.
-# This file is distinct from the CONTRIBUTORS files.
-# See the latter for an explanation.
-
-# Names should be added to this file as
-# Name or Organization
-# The email address is not required for organizations.
-
-# Please keep the list sorted.
-
-Damian Gryski
-Google Inc.
-Jan Mercl <0xjnml@gmail.com>
-Rodolfo Carvalho
-Sebastien Binet
diff --git a/vendor/src/github.com/golang/snappy/CONTRIBUTORS b/vendor/src/github.com/golang/snappy/CONTRIBUTORS
deleted file mode 100644
index 931ae31..0000000
--- a/vendor/src/github.com/golang/snappy/CONTRIBUTORS
+++ /dev/null
@@ -1,37 +0,0 @@
-# This is the official list of people who can contribute
-# (and typically have contributed) code to the Snappy-Go repository.
-# The AUTHORS file lists the copyright holders; this file
-# lists people. For example, Google employees are listed here
-# but not in AUTHORS, because Google holds the copyright.
-#
-# The submission process automatically checks to make sure
-# that people submitting code are listed in this file (by email address).
-#
-# Names should be added to this file only after verifying that
-# the individual or the individual's organization has agreed to
-# the appropriate Contributor License Agreement, found here:
-#
-# http://code.google.com/legal/individual-cla-v1.0.html
-# http://code.google.com/legal/corporate-cla-v1.0.html
-#
-# The agreement for individuals can be filled out on the web.
-#
-# When adding J Random Contributor's name to this file,
-# either J's name or J's organization's name should be
-# added to the AUTHORS file, depending on whether the
-# individual or corporate CLA was used.
-
-# Names should be added to this file like so:
-# Name
-
-# Please keep the list sorted.
-
-Damian Gryski
-Jan Mercl <0xjnml@gmail.com>
-Kai Backman
-Marc-Antoine Ruel
-Nigel Tao
-Rob Pike
-Rodolfo Carvalho
-Russ Cox
-Sebastien Binet
diff --git a/vendor/src/github.com/golang/snappy/LICENSE b/vendor/src/github.com/golang/snappy/LICENSE
deleted file mode 100644
index 6050c10..0000000
--- a/vendor/src/github.com/golang/snappy/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/src/github.com/golang/snappy/README b/vendor/src/github.com/golang/snappy/README
deleted file mode 100644
index cea1287..0000000
--- a/vendor/src/github.com/golang/snappy/README
+++ /dev/null
@@ -1,107 +0,0 @@
-The Snappy compression format in the Go programming language.
-
-To download and install from source:
-$ go get github.com/golang/snappy
-
-Unless otherwise noted, the Snappy-Go source files are distributed
-under the BSD-style license found in the LICENSE file.
-
-
-
-Benchmarks.
-
-The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
-or so files, the same set used by the C++ Snappy code (github.com/google/snappy
-and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
-3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
-
-"go test -test.bench=."
-
-_UFlat0-8 2.19GB/s ± 0% html
-_UFlat1-8 1.41GB/s ± 0% urls
-_UFlat2-8 23.5GB/s ± 2% jpg
-_UFlat3-8 1.91GB/s ± 0% jpg_200
-_UFlat4-8 14.0GB/s ± 1% pdf
-_UFlat5-8 1.97GB/s ± 0% html4
-_UFlat6-8 814MB/s ± 0% txt1
-_UFlat7-8 785MB/s ± 0% txt2
-_UFlat8-8 857MB/s ± 0% txt3
-_UFlat9-8 719MB/s ± 1% txt4
-_UFlat10-8 2.84GB/s ± 0% pb
-_UFlat11-8 1.05GB/s ± 0% gaviota
-
-_ZFlat0-8 1.04GB/s ± 0% html
-_ZFlat1-8 534MB/s ± 0% urls
-_ZFlat2-8 15.7GB/s ± 1% jpg
-_ZFlat3-8 740MB/s ± 3% jpg_200
-_ZFlat4-8 9.20GB/s ± 1% pdf
-_ZFlat5-8 991MB/s ± 0% html4
-_ZFlat6-8 379MB/s ± 0% txt1
-_ZFlat7-8 352MB/s ± 0% txt2
-_ZFlat8-8 396MB/s ± 1% txt3
-_ZFlat9-8 327MB/s ± 1% txt4
-_ZFlat10-8 1.33GB/s ± 1% pb
-_ZFlat11-8 605MB/s ± 1% gaviota
-
-
-
-"go test -test.bench=. -tags=noasm"
-
-_UFlat0-8 621MB/s ± 2% html
-_UFlat1-8 494MB/s ± 1% urls
-_UFlat2-8 23.2GB/s ± 1% jpg
-_UFlat3-8 1.12GB/s ± 1% jpg_200
-_UFlat4-8 4.35GB/s ± 1% pdf
-_UFlat5-8 609MB/s ± 0% html4
-_UFlat6-8 296MB/s ± 0% txt1
-_UFlat7-8 288MB/s ± 0% txt2
-_UFlat8-8 309MB/s ± 1% txt3
-_UFlat9-8 280MB/s ± 1% txt4
-_UFlat10-8 753MB/s ± 0% pb
-_UFlat11-8 400MB/s ± 0% gaviota
-
-_ZFlat0-8 409MB/s ± 1% html
-_ZFlat1-8 250MB/s ± 1% urls
-_ZFlat2-8 12.3GB/s ± 1% jpg
-_ZFlat3-8 132MB/s ± 0% jpg_200
-_ZFlat4-8 2.92GB/s ± 0% pdf
-_ZFlat5-8 405MB/s ± 1% html4
-_ZFlat6-8 179MB/s ± 1% txt1
-_ZFlat7-8 170MB/s ± 1% txt2
-_ZFlat8-8 189MB/s ± 1% txt3
-_ZFlat9-8 164MB/s ± 1% txt4
-_ZFlat10-8 479MB/s ± 1% pb
-_ZFlat11-8 270MB/s ± 1% gaviota
-
-
-
-For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
-are the numbers from C++ Snappy's
-
-make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
-
-BM_UFlat/0 2.4GB/s html
-BM_UFlat/1 1.4GB/s urls
-BM_UFlat/2 21.8GB/s jpg
-BM_UFlat/3 1.5GB/s jpg_200
-BM_UFlat/4 13.3GB/s pdf
-BM_UFlat/5 2.1GB/s html4
-BM_UFlat/6 1.0GB/s txt1
-BM_UFlat/7 959.4MB/s txt2
-BM_UFlat/8 1.0GB/s txt3
-BM_UFlat/9 864.5MB/s txt4
-BM_UFlat/10 2.9GB/s pb
-BM_UFlat/11 1.2GB/s gaviota
-
-BM_ZFlat/0 944.3MB/s html (22.31 %)
-BM_ZFlat/1 501.6MB/s urls (47.78 %)
-BM_ZFlat/2 14.3GB/s jpg (99.95 %)
-BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
-BM_ZFlat/4 8.3GB/s pdf (83.30 %)
-BM_ZFlat/5 903.5MB/s html4 (22.52 %)
-BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
-BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
-BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
-BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
-BM_ZFlat/10 1.2GB/s pb (19.68 %)
-BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/src/github.com/golang/snappy/cmd/snappytool/main.cpp b/vendor/src/github.com/golang/snappy/cmd/snappytool/main.cpp
deleted file mode 100644
index fc31f51..0000000
--- a/vendor/src/github.com/golang/snappy/cmd/snappytool/main.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
-To build the snappytool binary:
-g++ main.cpp /usr/lib/libsnappy.a -o snappytool
-or, if you have built the C++ snappy library from source:
-g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool
-after running "make" from your snappy checkout directory.
-*/
-
-#include
-#include
-#include
-#include
-
-#include "snappy.h"
-
-#define N 1000000
-
-char dst[N];
-char src[N];
-
-int main(int argc, char** argv) {
- // Parse args.
- if (argc != 2) {
- fprintf(stderr, "exactly one of -d or -e must be given\n");
- return 1;
- }
- bool decode = strcmp(argv[1], "-d") == 0;
- bool encode = strcmp(argv[1], "-e") == 0;
- if (decode == encode) {
- fprintf(stderr, "exactly one of -d or -e must be given\n");
- return 1;
- }
-
- // Read all of stdin into src[:s].
- size_t s = 0;
- while (1) {
- if (s == N) {
- fprintf(stderr, "input too large\n");
- return 1;
- }
- ssize_t n = read(0, src+s, N-s);
- if (n == 0) {
- break;
- }
- if (n < 0) {
- fprintf(stderr, "read error: %s\n", strerror(errno));
- // TODO: handle EAGAIN, EINTR?
- return 1;
- }
- s += n;
- }
-
- // Encode or decode src[:s] to dst[:d], and write to stdout.
- size_t d = 0;
- if (encode) {
- if (N < snappy::MaxCompressedLength(s)) {
- fprintf(stderr, "input too large after encoding\n");
- return 1;
- }
- snappy::RawCompress(src, s, dst, &d);
- } else {
- if (!snappy::GetUncompressedLength(src, s, &d)) {
- fprintf(stderr, "could not get uncompressed length\n");
- return 1;
- }
- if (N < d) {
- fprintf(stderr, "input too large after decoding\n");
- return 1;
- }
- if (!snappy::RawUncompress(src, s, dst)) {
- fprintf(stderr, "input was not valid Snappy-compressed data\n");
- return 1;
- }
- }
- write(1, dst, d);
- return 0;
-}
diff --git a/vendor/src/github.com/golang/snappy/decode.go b/vendor/src/github.com/golang/snappy/decode.go
deleted file mode 100644
index 72efb03..0000000
--- a/vendor/src/github.com/golang/snappy/decode.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package snappy
-
-import (
- "encoding/binary"
- "errors"
- "io"
-)
-
-var (
- // ErrCorrupt reports that the input is invalid.
- ErrCorrupt = errors.New("snappy: corrupt input")
- // ErrTooLarge reports that the uncompressed length is too large.
- ErrTooLarge = errors.New("snappy: decoded block is too large")
- // ErrUnsupported reports that the input isn't supported.
- ErrUnsupported = errors.New("snappy: unsupported input")
-
- errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
-)
-
-// DecodedLen returns the length of the decoded block.
-func DecodedLen(src []byte) (int, error) {
- v, _, err := decodedLen(src)
- return v, err
-}
-
-// decodedLen returns the length of the decoded block and the number of bytes
-// that the length header occupied.
-func decodedLen(src []byte) (blockLen, headerLen int, err error) {
- v, n := binary.Uvarint(src)
- if n <= 0 || v > 0xffffffff {
- return 0, 0, ErrCorrupt
- }
-
- const wordSize = 32 << (^uint(0) >> 32 & 1)
- if wordSize == 32 && v > 0x7fffffff {
- return 0, 0, ErrTooLarge
- }
- return int(v), n, nil
-}
-
-const (
- decodeErrCodeCorrupt = 1
- decodeErrCodeUnsupportedLiteralLength = 2
-)
-
-// Decode returns the decoded form of src. The returned slice may be a sub-
-// slice of dst if dst was large enough to hold the entire decoded block.
-// Otherwise, a newly allocated slice will be returned.
-//
-// The dst and src must not overlap. It is valid to pass a nil dst.
-func Decode(dst, src []byte) ([]byte, error) {
- dLen, s, err := decodedLen(src)
- if err != nil {
- return nil, err
- }
- if dLen <= len(dst) {
- dst = dst[:dLen]
- } else {
- dst = make([]byte, dLen)
- }
- switch decode(dst, src[s:]) {
- case 0:
- return dst, nil
- case decodeErrCodeUnsupportedLiteralLength:
- return nil, errUnsupportedLiteralLength
- }
- return nil, ErrCorrupt
-}
-
-// NewReader returns a new Reader that decompresses from r, using the framing
-// format described at
-// https://github.com/google/snappy/blob/master/framing_format.txt
-func NewReader(r io.Reader) *Reader {
- return &Reader{
- r: r,
- decoded: make([]byte, maxBlockSize),
- buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
- }
-}
-
-// Reader is an io.Reader that can read Snappy-compressed bytes.
-type Reader struct {
- r io.Reader
- err error
- decoded []byte
- buf []byte
- // decoded[i:j] contains decoded bytes that have not yet been passed on.
- i, j int
- readHeader bool
-}
-
-// Reset discards any buffered data, resets all state, and switches the Snappy
-// reader to read from r. This permits reusing a Reader rather than allocating
-// a new one.
-func (r *Reader) Reset(reader io.Reader) {
- r.r = reader
- r.err = nil
- r.i = 0
- r.j = 0
- r.readHeader = false
-}
-
-func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
- if _, r.err = io.ReadFull(r.r, p); r.err != nil {
- if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
- r.err = ErrCorrupt
- }
- return false
- }
- return true
-}
-
-// Read satisfies the io.Reader interface.
-func (r *Reader) Read(p []byte) (int, error) {
- if r.err != nil {
- return 0, r.err
- }
- for {
- if r.i < r.j {
- n := copy(p, r.decoded[r.i:r.j])
- r.i += n
- return n, nil
- }
- if !r.readFull(r.buf[:4], true) {
- return 0, r.err
- }
- chunkType := r.buf[0]
- if !r.readHeader {
- if chunkType != chunkTypeStreamIdentifier {
- r.err = ErrCorrupt
- return 0, r.err
- }
- r.readHeader = true
- }
- chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
- if chunkLen > len(r.buf) {
- r.err = ErrUnsupported
- return 0, r.err
- }
-
- // The chunk types are specified at
- // https://github.com/google/snappy/blob/master/framing_format.txt
- switch chunkType {
- case chunkTypeCompressedData:
- // Section 4.2. Compressed data (chunk type 0x00).
- if chunkLen < checksumSize {
- r.err = ErrCorrupt
- return 0, r.err
- }
- buf := r.buf[:chunkLen]
- if !r.readFull(buf, false) {
- return 0, r.err
- }
- checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
- buf = buf[checksumSize:]
-
- n, err := DecodedLen(buf)
- if err != nil {
- r.err = err
- return 0, r.err
- }
- if n > len(r.decoded) {
- r.err = ErrCorrupt
- return 0, r.err
- }
- if _, err := Decode(r.decoded, buf); err != nil {
- r.err = err
- return 0, r.err
- }
- if crc(r.decoded[:n]) != checksum {
- r.err = ErrCorrupt
- return 0, r.err
- }
- r.i, r.j = 0, n
- continue
-
- case chunkTypeUncompressedData:
- // Section 4.3. Uncompressed data (chunk type 0x01).
- if chunkLen < checksumSize {
- r.err = ErrCorrupt
- return 0, r.err
- }
- buf := r.buf[:checksumSize]
- if !r.readFull(buf, false) {
- return 0, r.err
- }
- checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
- // Read directly into r.decoded instead of via r.buf.
- n := chunkLen - checksumSize
- if n > len(r.decoded) {
- r.err = ErrCorrupt
- return 0, r.err
- }
- if !r.readFull(r.decoded[:n], false) {
- return 0, r.err
- }
- if crc(r.decoded[:n]) != checksum {
- r.err = ErrCorrupt
- return 0, r.err
- }
- r.i, r.j = 0, n
- continue
-
- case chunkTypeStreamIdentifier:
- // Section 4.1. Stream identifier (chunk type 0xff).
- if chunkLen != len(magicBody) {
- r.err = ErrCorrupt
- return 0, r.err
- }
- if !r.readFull(r.buf[:len(magicBody)], false) {
- return 0, r.err
- }
- for i := 0; i < len(magicBody); i++ {
- if r.buf[i] != magicBody[i] {
- r.err = ErrCorrupt
- return 0, r.err
- }
- }
- continue
- }
-
- if chunkType <= 0x7f {
- // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
- r.err = ErrUnsupported
- return 0, r.err
- }
- // Section 4.4 Padding (chunk type 0xfe).
- // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
- if !r.readFull(r.buf[:chunkLen], false) {
- return 0, r.err
- }
- }
-}
diff --git a/vendor/src/github.com/golang/snappy/decode_amd64.go b/vendor/src/github.com/golang/snappy/decode_amd64.go
deleted file mode 100644
index fcd192b..0000000
--- a/vendor/src/github.com/golang/snappy/decode_amd64.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2016 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-// +build gc
-// +build !noasm
-
-package snappy
-
-// decode has the same semantics as in decode_other.go.
-//
-//go:noescape
-func decode(dst, src []byte) int
diff --git a/vendor/src/github.com/golang/snappy/decode_amd64.s b/vendor/src/github.com/golang/snappy/decode_amd64.s
deleted file mode 100644
index e6179f6..0000000
--- a/vendor/src/github.com/golang/snappy/decode_amd64.s
+++ /dev/null
@@ -1,490 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-// +build gc
-// +build !noasm
-
-#include "textflag.h"
-
-// The asm code generally follows the pure Go code in decode_other.go, except
-// where marked with a "!!!".
-
-// func decode(dst, src []byte) int
-//
-// All local variables fit into registers. The non-zero stack size is only to
-// spill registers and push args when issuing a CALL. The register allocation:
-// - AX scratch
-// - BX scratch
-// - CX length or x
-// - DX offset
-// - SI &src[s]
-// - DI &dst[d]
-// + R8 dst_base
-// + R9 dst_len
-// + R10 dst_base + dst_len
-// + R11 src_base
-// + R12 src_len
-// + R13 src_base + src_len
-// - R14 used by doCopy
-// - R15 used by doCopy
-//
-// The registers R8-R13 (marked with a "+") are set at the start of the
-// function, and after a CALL returns, and are not otherwise modified.
-//
-// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
-// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
-TEXT ·decode(SB), NOSPLIT, $48-56
- // Initialize SI, DI and R8-R13.
- MOVQ dst_base+0(FP), R8
- MOVQ dst_len+8(FP), R9
- MOVQ R8, DI
- MOVQ R8, R10
- ADDQ R9, R10
- MOVQ src_base+24(FP), R11
- MOVQ src_len+32(FP), R12
- MOVQ R11, SI
- MOVQ R11, R13
- ADDQ R12, R13
-
-loop:
- // for s < len(src)
- CMPQ SI, R13
- JEQ end
-
- // CX = uint32(src[s])
- //
- // switch src[s] & 0x03
- MOVBLZX (SI), CX
- MOVL CX, BX
- ANDL $3, BX
- CMPL BX, $1
- JAE tagCopy
-
- // ----------------------------------------
- // The code below handles literal tags.
-
- // case tagLiteral:
- // x := uint32(src[s] >> 2)
- // switch
- SHRL $2, CX
- CMPL CX, $60
- JAE tagLit60Plus
-
- // case x < 60:
- // s++
- INCQ SI
-
-doLit:
- // This is the end of the inner "switch", when we have a literal tag.
- //
- // We assume that CX == x and x fits in a uint32, where x is the variable
- // used in the pure Go decode_other.go code.
-
- // length = int(x) + 1
- //
- // Unlike the pure Go code, we don't need to check if length <= 0 because
- // CX can hold 64 bits, so the increment cannot overflow.
- INCQ CX
-
- // Prepare to check if copying length bytes will run past the end of dst or
- // src.
- //
- // AX = len(dst) - d
- // BX = len(src) - s
- MOVQ R10, AX
- SUBQ DI, AX
- MOVQ R13, BX
- SUBQ SI, BX
-
- // !!! Try a faster technique for short (16 or fewer bytes) copies.
- //
- // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
- // goto callMemmove // Fall back on calling runtime·memmove.
- // }
- //
- // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
- // against 21 instead of 16, because it cannot assume that all of its input
- // is contiguous in memory and so it needs to leave enough source bytes to
- // read the next tag without refilling buffers, but Go's Decode assumes
- // contiguousness (the src argument is a []byte).
- CMPQ CX, $16
- JGT callMemmove
- CMPQ AX, $16
- JLT callMemmove
- CMPQ BX, $16
- JLT callMemmove
-
- // !!! Implement the copy from src to dst as a 16-byte load and store.
- // (Decode's documentation says that dst and src must not overlap.)
- //
- // This always copies 16 bytes, instead of only length bytes, but that's
- // OK. If the input is a valid Snappy encoding then subsequent iterations
- // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
- // non-nil error), so the overrun will be ignored.
- //
- // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
- // 16-byte loads and stores. This technique probably wouldn't be as
- // effective on architectures that are fussier about alignment.
- MOVOU 0(SI), X0
- MOVOU X0, 0(DI)
-
- // d += length
- // s += length
- ADDQ CX, DI
- ADDQ CX, SI
- JMP loop
-
-callMemmove:
- // if length > len(dst)-d || length > len(src)-s { etc }
- CMPQ CX, AX
- JGT errCorrupt
- CMPQ CX, BX
- JGT errCorrupt
-
- // copy(dst[d:], src[s:s+length])
- //
- // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
- // DI, SI and CX as arguments. Coincidentally, we also need to spill those
- // three registers to the stack, to save local variables across the CALL.
- MOVQ DI, 0(SP)
- MOVQ SI, 8(SP)
- MOVQ CX, 16(SP)
- MOVQ DI, 24(SP)
- MOVQ SI, 32(SP)
- MOVQ CX, 40(SP)
- CALL runtime·memmove(SB)
-
- // Restore local variables: unspill registers from the stack and
- // re-calculate R8-R13.
- MOVQ 24(SP), DI
- MOVQ 32(SP), SI
- MOVQ 40(SP), CX
- MOVQ dst_base+0(FP), R8
- MOVQ dst_len+8(FP), R9
- MOVQ R8, R10
- ADDQ R9, R10
- MOVQ src_base+24(FP), R11
- MOVQ src_len+32(FP), R12
- MOVQ R11, R13
- ADDQ R12, R13
-
- // d += length
- // s += length
- ADDQ CX, DI
- ADDQ CX, SI
- JMP loop
-
-tagLit60Plus:
- // !!! This fragment does the
- //
- // s += x - 58; if uint(s) > uint(len(src)) { etc }
- //
- // checks. In the asm version, we code it once instead of once per switch case.
- ADDQ CX, SI
- SUBQ $58, SI
- MOVQ SI, BX
- SUBQ R11, BX
- CMPQ BX, R12
- JA errCorrupt
-
- // case x == 60:
- CMPL CX, $61
- JEQ tagLit61
- JA tagLit62Plus
-
- // x = uint32(src[s-1])
- MOVBLZX -1(SI), CX
- JMP doLit
-
-tagLit61:
- // case x == 61:
- // x = uint32(src[s-2]) | uint32(src[s-1])<<8
- MOVWLZX -2(SI), CX
- JMP doLit
-
-tagLit62Plus:
- CMPL CX, $62
- JA tagLit63
-
- // case x == 62:
- // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
- MOVWLZX -3(SI), CX
- MOVBLZX -1(SI), BX
- SHLL $16, BX
- ORL BX, CX
- JMP doLit
-
-tagLit63:
- // case x == 63:
- // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
- MOVL -4(SI), CX
- JMP doLit
-
-// The code above handles literal tags.
-// ----------------------------------------
-// The code below handles copy tags.
-
-tagCopy4:
- // case tagCopy4:
- // s += 5
- ADDQ $5, SI
-
- // if uint(s) > uint(len(src)) { etc }
- MOVQ SI, BX
- SUBQ R11, BX
- CMPQ BX, R12
- JA errCorrupt
-
- // length = 1 + int(src[s-5])>>2
- SHRQ $2, CX
- INCQ CX
-
- // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
- MOVLQZX -4(SI), DX
- JMP doCopy
-
-tagCopy2:
- // case tagCopy2:
- // s += 3
- ADDQ $3, SI
-
- // if uint(s) > uint(len(src)) { etc }
- MOVQ SI, BX
- SUBQ R11, BX
- CMPQ BX, R12
- JA errCorrupt
-
- // length = 1 + int(src[s-3])>>2
- SHRQ $2, CX
- INCQ CX
-
- // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
- MOVWQZX -2(SI), DX
- JMP doCopy
-
-tagCopy:
- // We have a copy tag. We assume that:
- // - BX == src[s] & 0x03
- // - CX == src[s]
- CMPQ BX, $2
- JEQ tagCopy2
- JA tagCopy4
-
- // case tagCopy1:
- // s += 2
- ADDQ $2, SI
-
- // if uint(s) > uint(len(src)) { etc }
- MOVQ SI, BX
- SUBQ R11, BX
- CMPQ BX, R12
- JA errCorrupt
-
- // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
- MOVQ CX, DX
- ANDQ $0xe0, DX
- SHLQ $3, DX
- MOVBQZX -1(SI), BX
- ORQ BX, DX
-
- // length = 4 + int(src[s-2])>>2&0x7
- SHRQ $2, CX
- ANDQ $7, CX
- ADDQ $4, CX
-
-doCopy:
- // This is the end of the outer "switch", when we have a copy tag.
- //
- // We assume that:
- // - CX == length && CX > 0
- // - DX == offset
-
- // if offset <= 0 { etc }
- CMPQ DX, $0
- JLE errCorrupt
-
- // if d < offset { etc }
- MOVQ DI, BX
- SUBQ R8, BX
- CMPQ BX, DX
- JLT errCorrupt
-
- // if length > len(dst)-d { etc }
- MOVQ R10, BX
- SUBQ DI, BX
- CMPQ CX, BX
- JGT errCorrupt
-
- // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
- //
- // Set:
- // - R14 = len(dst)-d
- // - R15 = &dst[d-offset]
- MOVQ R10, R14
- SUBQ DI, R14
- MOVQ DI, R15
- SUBQ DX, R15
-
- // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
- //
- // First, try using two 8-byte load/stores, similar to the doLit technique
- // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
- // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
- // and not one 16-byte load/store, and the first store has to be before the
- // second load, due to the overlap if offset is in the range [8, 16).
- //
- // if length > 16 || offset < 8 || len(dst)-d < 16 {
- // goto slowForwardCopy
- // }
- // copy 16 bytes
- // d += length
- CMPQ CX, $16
- JGT slowForwardCopy
- CMPQ DX, $8
- JLT slowForwardCopy
- CMPQ R14, $16
- JLT slowForwardCopy
- MOVQ 0(R15), AX
- MOVQ AX, 0(DI)
- MOVQ 8(R15), BX
- MOVQ BX, 8(DI)
- ADDQ CX, DI
- JMP loop
-
-slowForwardCopy:
- // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
- // can still try 8-byte load stores, provided we can overrun up to 10 extra
- // bytes. As above, the overrun will be fixed up by subsequent iterations
- // of the outermost loop.
- //
- // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
- // commentary says:
- //
- // ----
- //
- // The main part of this loop is a simple copy of eight bytes at a time
- // until we've copied (at least) the requested amount of bytes. However,
- // if d and d-offset are less than eight bytes apart (indicating a
- // repeating pattern of length < 8), we first need to expand the pattern in
- // order to get the correct results. For instance, if the buffer looks like
- // this, with the eight-byte and patterns marked as
- // intervals:
- //
- // abxxxxxxxxxxxx
- // [------] d-offset
- // [------] d
- //
- // a single eight-byte copy from to will repeat the pattern
- // once, after which we can move two bytes without moving :
- //
- // ababxxxxxxxxxx
- // [------] d-offset
- // [------] d
- //
- // and repeat the exercise until the two no longer overlap.
- //
- // This allows us to do very well in the special case of one single byte
- // repeated many times, without taking a big hit for more general cases.
- //
- // The worst case of extra writing past the end of the match occurs when
- // offset == 1 and length == 1; the last copy will read from byte positions
- // [0..7] and write to [4..11], whereas it was only supposed to write to
- // position 1. Thus, ten excess bytes.
- //
- // ----
- //
- // That "10 byte overrun" worst case is confirmed by Go's
- // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
- // and finishSlowForwardCopy algorithm.
- //
- // if length > len(dst)-d-10 {
- // goto verySlowForwardCopy
- // }
- SUBQ $10, R14
- CMPQ CX, R14
- JGT verySlowForwardCopy
-
-makeOffsetAtLeast8:
- // !!! As above, expand the pattern so that offset >= 8 and we can use
- // 8-byte load/stores.
- //
- // for offset < 8 {
- // copy 8 bytes from dst[d-offset:] to dst[d:]
- // length -= offset
- // d += offset
- // offset += offset
- // // The two previous lines together means that d-offset, and therefore
- // // R15, is unchanged.
- // }
- CMPQ DX, $8
- JGE fixUpSlowForwardCopy
- MOVQ (R15), BX
- MOVQ BX, (DI)
- SUBQ DX, CX
- ADDQ DX, DI
- ADDQ DX, DX
- JMP makeOffsetAtLeast8
-
-fixUpSlowForwardCopy:
- // !!! Add length (which might be negative now) to d (implied by DI being
- // &dst[d]) so that d ends up at the right place when we jump back to the
- // top of the loop. Before we do that, though, we save DI to AX so that, if
- // length is positive, copying the remaining length bytes will write to the
- // right place.
- MOVQ DI, AX
- ADDQ CX, DI
-
-finishSlowForwardCopy:
- // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
- // length means that we overrun, but as above, that will be fixed up by
- // subsequent iterations of the outermost loop.
- CMPQ CX, $0
- JLE loop
- MOVQ (R15), BX
- MOVQ BX, (AX)
- ADDQ $8, R15
- ADDQ $8, AX
- SUBQ $8, CX
- JMP finishSlowForwardCopy
-
-verySlowForwardCopy:
- // verySlowForwardCopy is a simple implementation of forward copy. In C
- // parlance, this is a do/while loop instead of a while loop, since we know
- // that length > 0. In Go syntax:
- //
- // for {
- // dst[d] = dst[d - offset]
- // d++
- // length--
- // if length == 0 {
- // break
- // }
- // }
- MOVB (R15), BX
- MOVB BX, (DI)
- INCQ R15
- INCQ DI
- DECQ CX
- JNZ verySlowForwardCopy
- JMP loop
-
-// The code above handles copy tags.
-// ----------------------------------------
-
-end:
- // This is the end of the "for s < len(src)".
- //
- // if d != len(dst) { etc }
- CMPQ DI, R10
- JNE errCorrupt
-
- // return 0
- MOVQ $0, ret+48(FP)
- RET
-
-errCorrupt:
- // return decodeErrCodeCorrupt
- MOVQ $1, ret+48(FP)
- RET
diff --git a/vendor/src/github.com/golang/snappy/decode_other.go b/vendor/src/github.com/golang/snappy/decode_other.go
deleted file mode 100644
index 8c9f204..0000000
--- a/vendor/src/github.com/golang/snappy/decode_other.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2016 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !amd64 appengine !gc noasm
-
-package snappy
-
-// decode writes the decoding of src to dst. It assumes that the varint-encoded
-// length of the decompressed bytes has already been read, and that len(dst)
-// equals that length.
-//
-// It returns 0 on success or a decodeErrCodeXxx error code on failure.
-func decode(dst, src []byte) int {
- var d, s, offset, length int
- for s < len(src) {
- switch src[s] & 0x03 {
- case tagLiteral:
- x := uint32(src[s] >> 2)
- switch {
- case x < 60:
- s++
- case x == 60:
- s += 2
- if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
- return decodeErrCodeCorrupt
- }
- x = uint32(src[s-1])
- case x == 61:
- s += 3
- if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
- return decodeErrCodeCorrupt
- }
- x = uint32(src[s-2]) | uint32(src[s-1])<<8
- case x == 62:
- s += 4
- if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
- return decodeErrCodeCorrupt
- }
- x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
- case x == 63:
- s += 5
- if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
- return decodeErrCodeCorrupt
- }
- x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
- }
- length = int(x) + 1
- if length <= 0 {
- return decodeErrCodeUnsupportedLiteralLength
- }
- if length > len(dst)-d || length > len(src)-s {
- return decodeErrCodeCorrupt
- }
- copy(dst[d:], src[s:s+length])
- d += length
- s += length
- continue
-
- case tagCopy1:
- s += 2
- if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
- return decodeErrCodeCorrupt
- }
- length = 4 + int(src[s-2])>>2&0x7
- offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
-
- case tagCopy2:
- s += 3
- if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
- return decodeErrCodeCorrupt
- }
- length = 1 + int(src[s-3])>>2
- offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
-
- case tagCopy4:
- s += 5
- if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
- return decodeErrCodeCorrupt
- }
- length = 1 + int(src[s-5])>>2
- offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
- }
-
- if offset <= 0 || d < offset || length > len(dst)-d {
- return decodeErrCodeCorrupt
- }
- // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
- // the built-in copy function, this byte-by-byte copy always runs
- // forwards, even if the slices overlap. Conceptually, this is:
- //
- // d += forwardCopy(dst[d:d+length], dst[d-offset:])
- for end := d + length; d != end; d++ {
- dst[d] = dst[d-offset]
- }
- }
- if d != len(dst) {
- return decodeErrCodeCorrupt
- }
- return 0
-}
diff --git a/vendor/src/github.com/golang/snappy/encode.go b/vendor/src/github.com/golang/snappy/encode.go
deleted file mode 100644
index 8749689..0000000
--- a/vendor/src/github.com/golang/snappy/encode.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package snappy
-
-import (
- "encoding/binary"
- "errors"
- "io"
-)
-
-// Encode returns the encoded form of src. The returned slice may be a sub-
-// slice of dst if dst was large enough to hold the entire encoded block.
-// Otherwise, a newly allocated slice will be returned.
-//
-// The dst and src must not overlap. It is valid to pass a nil dst.
-func Encode(dst, src []byte) []byte {
- if n := MaxEncodedLen(len(src)); n < 0 {
- panic(ErrTooLarge)
- } else if len(dst) < n {
- dst = make([]byte, n)
- }
-
- // The block starts with the varint-encoded length of the decompressed bytes.
- d := binary.PutUvarint(dst, uint64(len(src)))
-
- for len(src) > 0 {
- p := src
- src = nil
- if len(p) > maxBlockSize {
- p, src = p[:maxBlockSize], p[maxBlockSize:]
- }
- if len(p) < minNonLiteralBlockSize {
- d += emitLiteral(dst[d:], p)
- } else {
- d += encodeBlock(dst[d:], p)
- }
- }
- return dst[:d]
-}
-
-// inputMargin is the minimum number of extra input bytes to keep, inside
-// encodeBlock's inner loop. On some architectures, this margin lets us
-// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
-// literals can be implemented as a single load to and store from a 16-byte
-// register. That literal's actual length can be as short as 1 byte, so this
-// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
-// the encoding loop will fix up the copy overrun, and this inputMargin ensures
-// that we don't overrun the dst and src buffers.
-const inputMargin = 16 - 1
-
-// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
-// could be encoded with a copy tag. This is the minimum with respect to the
-// algorithm used by encodeBlock, not a minimum enforced by the file format.
-//
-// The encoded output must start with at least a 1 byte literal, as there are
-// no previous bytes to copy. A minimal (1 byte) copy after that, generated
-// from an emitCopy call in encodeBlock's main loop, would require at least
-// another inputMargin bytes, for the reason above: we want any emitLiteral
-// calls inside encodeBlock's main loop to use the fast path if possible, which
-// requires being able to overrun by inputMargin bytes. Thus,
-// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
-//
-// The C++ code doesn't use this exact threshold, but it could, as discussed at
-// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
-// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
-// optimization. It should not affect the encoded form. This is tested by
-// TestSameEncodingAsCppShortCopies.
-const minNonLiteralBlockSize = 1 + 1 + inputMargin
-
-// MaxEncodedLen returns the maximum length of a snappy block, given its
-// uncompressed length.
-//
-// It will return a negative value if srcLen is too large to encode.
-func MaxEncodedLen(srcLen int) int {
- n := uint64(srcLen)
- if n > 0xffffffff {
- return -1
- }
- // Compressed data can be defined as:
- // compressed := item* literal*
- // item := literal* copy
- //
- // The trailing literal sequence has a space blowup of at most 62/60
- // since a literal of length 60 needs one tag byte + one extra byte
- // for length information.
- //
- // Item blowup is trickier to measure. Suppose the "copy" op copies
- // 4 bytes of data. Because of a special check in the encoding code,
- // we produce a 4-byte copy only if the offset is < 65536. Therefore
- // the copy op takes 3 bytes to encode, and this type of item leads
- // to at most the 62/60 blowup for representing literals.
- //
- // Suppose the "copy" op copies 5 bytes of data. If the offset is big
- // enough, it will take 5 bytes to encode the copy op. Therefore the
- // worst case here is a one-byte literal followed by a five-byte copy.
- // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
- //
- // This last factor dominates the blowup, so the final estimate is:
- n = 32 + n + n/6
- if n > 0xffffffff {
- return -1
- }
- return int(n)
-}
-
-var errClosed = errors.New("snappy: Writer is closed")
-
-// NewWriter returns a new Writer that compresses to w.
-//
-// The Writer returned does not buffer writes. There is no need to Flush or
-// Close such a Writer.
-//
-// Deprecated: the Writer returned is not suitable for many small writes, only
-// for few large writes. Use NewBufferedWriter instead, which is efficient
-// regardless of the frequency and shape of the writes, and remember to Close
-// that Writer when done.
-func NewWriter(w io.Writer) *Writer {
- return &Writer{
- w: w,
- obuf: make([]byte, obufLen),
- }
-}
-
-// NewBufferedWriter returns a new Writer that compresses to w, using the
-// framing format described at
-// https://github.com/google/snappy/blob/master/framing_format.txt
-//
-// The Writer returned buffers writes. Users must call Close to guarantee all
-// data has been forwarded to the underlying io.Writer. They may also call
-// Flush zero or more times before calling Close.
-func NewBufferedWriter(w io.Writer) *Writer {
- return &Writer{
- w: w,
- ibuf: make([]byte, 0, maxBlockSize),
- obuf: make([]byte, obufLen),
- }
-}
-
-// Writer is an io.Writer than can write Snappy-compressed bytes.
-type Writer struct {
- w io.Writer
- err error
-
- // ibuf is a buffer for the incoming (uncompressed) bytes.
- //
- // Its use is optional. For backwards compatibility, Writers created by the
- // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
- // therefore do not need to be Flush'ed or Close'd.
- ibuf []byte
-
- // obuf is a buffer for the outgoing (compressed) bytes.
- obuf []byte
-
- // wroteStreamHeader is whether we have written the stream header.
- wroteStreamHeader bool
-}
-
-// Reset discards the writer's state and switches the Snappy writer to write to
-// w. This permits reusing a Writer rather than allocating a new one.
-func (w *Writer) Reset(writer io.Writer) {
- w.w = writer
- w.err = nil
- if w.ibuf != nil {
- w.ibuf = w.ibuf[:0]
- }
- w.wroteStreamHeader = false
-}
-
-// Write satisfies the io.Writer interface.
-func (w *Writer) Write(p []byte) (nRet int, errRet error) {
- if w.ibuf == nil {
- // Do not buffer incoming bytes. This does not perform or compress well
- // if the caller of Writer.Write writes many small slices. This
- // behavior is therefore deprecated, but still supported for backwards
- // compatibility with code that doesn't explicitly Flush or Close.
- return w.write(p)
- }
-
- // The remainder of this method is based on bufio.Writer.Write from the
- // standard library.
-
- for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
- var n int
- if len(w.ibuf) == 0 {
- // Large write, empty buffer.
- // Write directly from p to avoid copy.
- n, _ = w.write(p)
- } else {
- n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
- w.ibuf = w.ibuf[:len(w.ibuf)+n]
- w.Flush()
- }
- nRet += n
- p = p[n:]
- }
- if w.err != nil {
- return nRet, w.err
- }
- n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
- w.ibuf = w.ibuf[:len(w.ibuf)+n]
- nRet += n
- return nRet, nil
-}
-
-func (w *Writer) write(p []byte) (nRet int, errRet error) {
- if w.err != nil {
- return 0, w.err
- }
- for len(p) > 0 {
- obufStart := len(magicChunk)
- if !w.wroteStreamHeader {
- w.wroteStreamHeader = true
- copy(w.obuf, magicChunk)
- obufStart = 0
- }
-
- var uncompressed []byte
- if len(p) > maxBlockSize {
- uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
- } else {
- uncompressed, p = p, nil
- }
- checksum := crc(uncompressed)
-
- // Compress the buffer, discarding the result if the improvement
- // isn't at least 12.5%.
- compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
- chunkType := uint8(chunkTypeCompressedData)
- chunkLen := 4 + len(compressed)
- obufEnd := obufHeaderLen + len(compressed)
- if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
- chunkType = chunkTypeUncompressedData
- chunkLen = 4 + len(uncompressed)
- obufEnd = obufHeaderLen
- }
-
- // Fill in the per-chunk header that comes before the body.
- w.obuf[len(magicChunk)+0] = chunkType
- w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
- w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
- w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
- w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
- w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
- w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
- w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
-
- if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
- w.err = err
- return nRet, err
- }
- if chunkType == chunkTypeUncompressedData {
- if _, err := w.w.Write(uncompressed); err != nil {
- w.err = err
- return nRet, err
- }
- }
- nRet += len(uncompressed)
- }
- return nRet, nil
-}
-
-// Flush flushes the Writer to its underlying io.Writer.
-func (w *Writer) Flush() error {
- if w.err != nil {
- return w.err
- }
- if len(w.ibuf) == 0 {
- return nil
- }
- w.write(w.ibuf)
- w.ibuf = w.ibuf[:0]
- return w.err
-}
-
-// Close calls Flush and then closes the Writer.
-func (w *Writer) Close() error {
- w.Flush()
- ret := w.err
- if w.err == nil {
- w.err = errClosed
- }
- return ret
-}
diff --git a/vendor/src/github.com/golang/snappy/encode_amd64.go b/vendor/src/github.com/golang/snappy/encode_amd64.go
deleted file mode 100644
index 2a56fb5..0000000
--- a/vendor/src/github.com/golang/snappy/encode_amd64.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2016 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-// +build gc
-// +build !noasm
-
-package snappy
-
-// emitLiteral has the same semantics as in encode_other.go.
-//
-//go:noescape
-func emitLiteral(dst, lit []byte) int
-
-// emitCopy has the same semantics as in encode_other.go.
-//
-//go:noescape
-func emitCopy(dst []byte, offset, length int) int
-
-// extendMatch has the same semantics as in encode_other.go.
-//
-//go:noescape
-func extendMatch(src []byte, i, j int) int
-
-// encodeBlock has the same semantics as in encode_other.go.
-//
-//go:noescape
-func encodeBlock(dst, src []byte) (d int)
\ No newline at end of file
diff --git a/vendor/src/github.com/golang/snappy/encode_amd64.s b/vendor/src/github.com/golang/snappy/encode_amd64.s
deleted file mode 100644
index adfd979..0000000
--- a/vendor/src/github.com/golang/snappy/encode_amd64.s
+++ /dev/null
@@ -1,730 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-// +build gc
-// +build !noasm
-
-#include "textflag.h"
-
-// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
-// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
-// https://github.com/golang/snappy/issues/29
-//
-// As a workaround, the package was built with a known good assembler, and
-// those instructions were disassembled by "objdump -d" to yield the
-// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
-// style comments, in AT&T asm syntax. Note that rsp here is a physical
-// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
-// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
-// fine on Go 1.6.
-
-// The asm code generally follows the pure Go code in encode_other.go, except
-// where marked with a "!!!".
-
-// ----------------------------------------------------------------------------
-
-// func emitLiteral(dst, lit []byte) int
-//
-// All local variables fit into registers. The register allocation:
-// - AX len(lit)
-// - BX n
-// - DX return value
-// - DI &dst[i]
-// - R10 &lit[0]
-//
-// The 24 bytes of stack space is to call runtime·memmove.
-//
-// The unusual register allocation of local variables, such as R10 for the
-// source pointer, matches the allocation used at the call site in encodeBlock,
-// which makes it easier to manually inline this function.
-TEXT ·emitLiteral(SB), NOSPLIT, $24-56
- MOVQ dst_base+0(FP), DI
- MOVQ lit_base+24(FP), R10
- MOVQ lit_len+32(FP), AX
- MOVQ AX, DX
- MOVL AX, BX
- SUBL $1, BX
-
- CMPL BX, $60
- JLT oneByte
- CMPL BX, $256
- JLT twoBytes
-
-threeBytes:
- MOVB $0xf4, 0(DI)
- MOVW BX, 1(DI)
- ADDQ $3, DI
- ADDQ $3, DX
- JMP memmove
-
-twoBytes:
- MOVB $0xf0, 0(DI)
- MOVB BX, 1(DI)
- ADDQ $2, DI
- ADDQ $2, DX
- JMP memmove
-
-oneByte:
- SHLB $2, BX
- MOVB BX, 0(DI)
- ADDQ $1, DI
- ADDQ $1, DX
-
-memmove:
- MOVQ DX, ret+48(FP)
-
- // copy(dst[i:], lit)
- //
- // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
- // DI, R10 and AX as arguments.
- MOVQ DI, 0(SP)
- MOVQ R10, 8(SP)
- MOVQ AX, 16(SP)
- CALL runtime·memmove(SB)
- RET
-
-// ----------------------------------------------------------------------------
-
-// func emitCopy(dst []byte, offset, length int) int
-//
-// All local variables fit into registers. The register allocation:
-// - AX length
-// - SI &dst[0]
-// - DI &dst[i]
-// - R11 offset
-//
-// The unusual register allocation of local variables, such as R11 for the
-// offset, matches the allocation used at the call site in encodeBlock, which
-// makes it easier to manually inline this function.
-TEXT ·emitCopy(SB), NOSPLIT, $0-48
- MOVQ dst_base+0(FP), DI
- MOVQ DI, SI
- MOVQ offset+24(FP), R11
- MOVQ length+32(FP), AX
-
-loop0:
- // for length >= 68 { etc }
- CMPL AX, $68
- JLT step1
-
- // Emit a length 64 copy, encoded as 3 bytes.
- MOVB $0xfe, 0(DI)
- MOVW R11, 1(DI)
- ADDQ $3, DI
- SUBL $64, AX
- JMP loop0
-
-step1:
- // if length > 64 { etc }
- CMPL AX, $64
- JLE step2
-
- // Emit a length 60 copy, encoded as 3 bytes.
- MOVB $0xee, 0(DI)
- MOVW R11, 1(DI)
- ADDQ $3, DI
- SUBL $60, AX
-
-step2:
- // if length >= 12 || offset >= 2048 { goto step3 }
- CMPL AX, $12
- JGE step3
- CMPL R11, $2048
- JGE step3
-
- // Emit the remaining copy, encoded as 2 bytes.
- MOVB R11, 1(DI)
- SHRL $8, R11
- SHLB $5, R11
- SUBB $4, AX
- SHLB $2, AX
- ORB AX, R11
- ORB $1, R11
- MOVB R11, 0(DI)
- ADDQ $2, DI
-
- // Return the number of bytes written.
- SUBQ SI, DI
- MOVQ DI, ret+40(FP)
- RET
-
-step3:
- // Emit the remaining copy, encoded as 3 bytes.
- SUBL $1, AX
- SHLB $2, AX
- ORB $2, AX
- MOVB AX, 0(DI)
- MOVW R11, 1(DI)
- ADDQ $3, DI
-
- // Return the number of bytes written.
- SUBQ SI, DI
- MOVQ DI, ret+40(FP)
- RET
-
-// ----------------------------------------------------------------------------
-
-// func extendMatch(src []byte, i, j int) int
-//
-// All local variables fit into registers. The register allocation:
-// - DX &src[0]
-// - SI &src[j]
-// - R13 &src[len(src) - 8]
-// - R14 &src[len(src)]
-// - R15 &src[i]
-//
-// The unusual register allocation of local variables, such as R15 for a source
-// pointer, matches the allocation used at the call site in encodeBlock, which
-// makes it easier to manually inline this function.
-TEXT ·extendMatch(SB), NOSPLIT, $0-48
- MOVQ src_base+0(FP), DX
- MOVQ src_len+8(FP), R14
- MOVQ i+24(FP), R15
- MOVQ j+32(FP), SI
- ADDQ DX, R14
- ADDQ DX, R15
- ADDQ DX, SI
- MOVQ R14, R13
- SUBQ $8, R13
-
-cmp8:
- // As long as we are 8 or more bytes before the end of src, we can load and
- // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
- CMPQ SI, R13
- JA cmp1
- MOVQ (R15), AX
- MOVQ (SI), BX
- CMPQ AX, BX
- JNE bsf
- ADDQ $8, R15
- ADDQ $8, SI
- JMP cmp8
-
-bsf:
- // If those 8 bytes were not equal, XOR the two 8 byte values, and return
- // the index of the first byte that differs. The BSF instruction finds the
- // least significant 1 bit, the amd64 architecture is little-endian, and
- // the shift by 3 converts a bit index to a byte index.
- XORQ AX, BX
- BSFQ BX, BX
- SHRQ $3, BX
- ADDQ BX, SI
-
- // Convert from &src[ret] to ret.
- SUBQ DX, SI
- MOVQ SI, ret+40(FP)
- RET
-
-cmp1:
- // In src's tail, compare 1 byte at a time.
- CMPQ SI, R14
- JAE extendMatchEnd
- MOVB (R15), AX
- MOVB (SI), BX
- CMPB AX, BX
- JNE extendMatchEnd
- ADDQ $1, R15
- ADDQ $1, SI
- JMP cmp1
-
-extendMatchEnd:
- // Convert from &src[ret] to ret.
- SUBQ DX, SI
- MOVQ SI, ret+40(FP)
- RET
-
-// ----------------------------------------------------------------------------
-
-// func encodeBlock(dst, src []byte) (d int)
-//
-// All local variables fit into registers, other than "var table". The register
-// allocation:
-// - AX . .
-// - BX . .
-// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
-// - DX 64 &src[0], tableSize
-// - SI 72 &src[s]
-// - DI 80 &dst[d]
-// - R9 88 sLimit
-// - R10 . &src[nextEmit]
-// - R11 96 prevHash, currHash, nextHash, offset
-// - R12 104 &src[base], skip
-// - R13 . &src[nextS], &src[len(src) - 8]
-// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
-// - R15 112 candidate
-//
-// The second column (56, 64, etc) is the stack offset to spill the registers
-// when calling other functions. We could pack this slightly tighter, but it's
-// simpler to have a dedicated spill map independent of the function called.
-//
-// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
-// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
-// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
-TEXT ·encodeBlock(SB), 0, $32888-56
- MOVQ dst_base+0(FP), DI
- MOVQ src_base+24(FP), SI
- MOVQ src_len+32(FP), R14
-
- // shift, tableSize := uint32(32-8), 1<<8
- MOVQ $24, CX
- MOVQ $256, DX
-
-calcShift:
- // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
- // shift--
- // }
- CMPQ DX, $16384
- JGE varTable
- CMPQ DX, R14
- JGE varTable
- SUBQ $1, CX
- SHLQ $1, DX
- JMP calcShift
-
-varTable:
- // var table [maxTableSize]uint16
- //
- // In the asm code, unlike the Go code, we can zero-initialize only the
- // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
- // writes 16 bytes, so we can do only tableSize/8 writes instead of the
- // 2048 writes that would zero-initialize all of table's 32768 bytes.
- SHRQ $3, DX
- LEAQ table-32768(SP), BX
- PXOR X0, X0
-
-memclr:
- MOVOU X0, 0(BX)
- ADDQ $16, BX
- SUBQ $1, DX
- JNZ memclr
-
- // !!! DX = &src[0]
- MOVQ SI, DX
-
- // sLimit := len(src) - inputMargin
- MOVQ R14, R9
- SUBQ $15, R9
-
- // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
- // change for the rest of the function.
- MOVQ CX, 56(SP)
- MOVQ DX, 64(SP)
- MOVQ R9, 88(SP)
-
- // nextEmit := 0
- MOVQ DX, R10
-
- // s := 1
- ADDQ $1, SI
-
- // nextHash := hash(load32(src, s), shift)
- MOVL 0(SI), R11
- IMULL $0x1e35a7bd, R11
- SHRL CX, R11
-
-outer:
- // for { etc }
-
- // skip := 32
- MOVQ $32, R12
-
- // nextS := s
- MOVQ SI, R13
-
- // candidate := 0
- MOVQ $0, R15
-
-inner0:
- // for { etc }
-
- // s := nextS
- MOVQ R13, SI
-
- // bytesBetweenHashLookups := skip >> 5
- MOVQ R12, R14
- SHRQ $5, R14
-
- // nextS = s + bytesBetweenHashLookups
- ADDQ R14, R13
-
- // skip += bytesBetweenHashLookups
- ADDQ R14, R12
-
- // if nextS > sLimit { goto emitRemainder }
- MOVQ R13, AX
- SUBQ DX, AX
- CMPQ AX, R9
- JA emitRemainder
-
- // candidate = int(table[nextHash])
- // XXX: MOVWQZX table-32768(SP)(R11*2), R15
- // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
- BYTE $0x4e
- BYTE $0x0f
- BYTE $0xb7
- BYTE $0x7c
- BYTE $0x5c
- BYTE $0x78
-
- // table[nextHash] = uint16(s)
- MOVQ SI, AX
- SUBQ DX, AX
-
- // XXX: MOVW AX, table-32768(SP)(R11*2)
- // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
- BYTE $0x66
- BYTE $0x42
- BYTE $0x89
- BYTE $0x44
- BYTE $0x5c
- BYTE $0x78
-
- // nextHash = hash(load32(src, nextS), shift)
- MOVL 0(R13), R11
- IMULL $0x1e35a7bd, R11
- SHRL CX, R11
-
- // if load32(src, s) != load32(src, candidate) { continue } break
- MOVL 0(SI), AX
- MOVL (DX)(R15*1), BX
- CMPL AX, BX
- JNE inner0
-
-fourByteMatch:
- // As per the encode_other.go code:
- //
- // A 4-byte match has been found. We'll later see etc.
-
- // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
- // on inputMargin in encode.go.
- MOVQ SI, AX
- SUBQ R10, AX
- CMPQ AX, $16
- JLE emitLiteralFastPath
-
- // ----------------------------------------
- // Begin inline of the emitLiteral call.
- //
- // d += emitLiteral(dst[d:], src[nextEmit:s])
-
- MOVL AX, BX
- SUBL $1, BX
-
- CMPL BX, $60
- JLT inlineEmitLiteralOneByte
- CMPL BX, $256
- JLT inlineEmitLiteralTwoBytes
-
-inlineEmitLiteralThreeBytes:
- MOVB $0xf4, 0(DI)
- MOVW BX, 1(DI)
- ADDQ $3, DI
- JMP inlineEmitLiteralMemmove
-
-inlineEmitLiteralTwoBytes:
- MOVB $0xf0, 0(DI)
- MOVB BX, 1(DI)
- ADDQ $2, DI
- JMP inlineEmitLiteralMemmove
-
-inlineEmitLiteralOneByte:
- SHLB $2, BX
- MOVB BX, 0(DI)
- ADDQ $1, DI
-
-inlineEmitLiteralMemmove:
- // Spill local variables (registers) onto the stack; call; unspill.
- //
- // copy(dst[i:], lit)
- //
- // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
- // DI, R10 and AX as arguments.
- MOVQ DI, 0(SP)
- MOVQ R10, 8(SP)
- MOVQ AX, 16(SP)
- ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
- MOVQ SI, 72(SP)
- MOVQ DI, 80(SP)
- MOVQ R15, 112(SP)
- CALL runtime·memmove(SB)
- MOVQ 56(SP), CX
- MOVQ 64(SP), DX
- MOVQ 72(SP), SI
- MOVQ 80(SP), DI
- MOVQ 88(SP), R9
- MOVQ 112(SP), R15
- JMP inner1
-
-inlineEmitLiteralEnd:
- // End inline of the emitLiteral call.
- // ----------------------------------------
-
-emitLiteralFastPath:
- // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
- MOVB AX, BX
- SUBB $1, BX
- SHLB $2, BX
- MOVB BX, (DI)
- ADDQ $1, DI
-
- // !!! Implement the copy from lit to dst as a 16-byte load and store.
- // (Encode's documentation says that dst and src must not overlap.)
- //
- // This always copies 16 bytes, instead of only len(lit) bytes, but that's
- // OK. Subsequent iterations will fix up the overrun.
- //
- // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
- // 16-byte loads and stores. This technique probably wouldn't be as
- // effective on architectures that are fussier about alignment.
- MOVOU 0(R10), X0
- MOVOU X0, 0(DI)
- ADDQ AX, DI
-
-inner1:
- // for { etc }
-
- // base := s
- MOVQ SI, R12
-
- // !!! offset := base - candidate
- MOVQ R12, R11
- SUBQ R15, R11
- SUBQ DX, R11
-
- // ----------------------------------------
- // Begin inline of the extendMatch call.
- //
- // s = extendMatch(src, candidate+4, s+4)
-
- // !!! R14 = &src[len(src)]
- MOVQ src_len+32(FP), R14
- ADDQ DX, R14
-
- // !!! R13 = &src[len(src) - 8]
- MOVQ R14, R13
- SUBQ $8, R13
-
- // !!! R15 = &src[candidate + 4]
- ADDQ $4, R15
- ADDQ DX, R15
-
- // !!! s += 4
- ADDQ $4, SI
-
-inlineExtendMatchCmp8:
- // As long as we are 8 or more bytes before the end of src, we can load and
- // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
- CMPQ SI, R13
- JA inlineExtendMatchCmp1
- MOVQ (R15), AX
- MOVQ (SI), BX
- CMPQ AX, BX
- JNE inlineExtendMatchBSF
- ADDQ $8, R15
- ADDQ $8, SI
- JMP inlineExtendMatchCmp8
-
-inlineExtendMatchBSF:
- // If those 8 bytes were not equal, XOR the two 8 byte values, and return
- // the index of the first byte that differs. The BSF instruction finds the
- // least significant 1 bit, the amd64 architecture is little-endian, and
- // the shift by 3 converts a bit index to a byte index.
- XORQ AX, BX
- BSFQ BX, BX
- SHRQ $3, BX
- ADDQ BX, SI
- JMP inlineExtendMatchEnd
-
-inlineExtendMatchCmp1:
- // In src's tail, compare 1 byte at a time.
- CMPQ SI, R14
- JAE inlineExtendMatchEnd
- MOVB (R15), AX
- MOVB (SI), BX
- CMPB AX, BX
- JNE inlineExtendMatchEnd
- ADDQ $1, R15
- ADDQ $1, SI
- JMP inlineExtendMatchCmp1
-
-inlineExtendMatchEnd:
- // End inline of the extendMatch call.
- // ----------------------------------------
-
- // ----------------------------------------
- // Begin inline of the emitCopy call.
- //
- // d += emitCopy(dst[d:], base-candidate, s-base)
-
- // !!! length := s - base
- MOVQ SI, AX
- SUBQ R12, AX
-
-inlineEmitCopyLoop0:
- // for length >= 68 { etc }
- CMPL AX, $68
- JLT inlineEmitCopyStep1
-
- // Emit a length 64 copy, encoded as 3 bytes.
- MOVB $0xfe, 0(DI)
- MOVW R11, 1(DI)
- ADDQ $3, DI
- SUBL $64, AX
- JMP inlineEmitCopyLoop0
-
-inlineEmitCopyStep1:
- // if length > 64 { etc }
- CMPL AX, $64
- JLE inlineEmitCopyStep2
-
- // Emit a length 60 copy, encoded as 3 bytes.
- MOVB $0xee, 0(DI)
- MOVW R11, 1(DI)
- ADDQ $3, DI
- SUBL $60, AX
-
-inlineEmitCopyStep2:
- // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
- CMPL AX, $12
- JGE inlineEmitCopyStep3
- CMPL R11, $2048
- JGE inlineEmitCopyStep3
-
- // Emit the remaining copy, encoded as 2 bytes.
- MOVB R11, 1(DI)
- SHRL $8, R11
- SHLB $5, R11
- SUBB $4, AX
- SHLB $2, AX
- ORB AX, R11
- ORB $1, R11
- MOVB R11, 0(DI)
- ADDQ $2, DI
- JMP inlineEmitCopyEnd
-
-inlineEmitCopyStep3:
- // Emit the remaining copy, encoded as 3 bytes.
- SUBL $1, AX
- SHLB $2, AX
- ORB $2, AX
- MOVB AX, 0(DI)
- MOVW R11, 1(DI)
- ADDQ $3, DI
-
-inlineEmitCopyEnd:
- // End inline of the emitCopy call.
- // ----------------------------------------
-
- // nextEmit = s
- MOVQ SI, R10
-
- // if s >= sLimit { goto emitRemainder }
- MOVQ SI, AX
- SUBQ DX, AX
- CMPQ AX, R9
- JAE emitRemainder
-
- // As per the encode_other.go code:
- //
- // We could immediately etc.
-
- // x := load64(src, s-1)
- MOVQ -1(SI), R14
-
- // prevHash := hash(uint32(x>>0), shift)
- MOVL R14, R11
- IMULL $0x1e35a7bd, R11
- SHRL CX, R11
-
- // table[prevHash] = uint16(s-1)
- MOVQ SI, AX
- SUBQ DX, AX
- SUBQ $1, AX
-
- // XXX: MOVW AX, table-32768(SP)(R11*2)
- // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
- BYTE $0x66
- BYTE $0x42
- BYTE $0x89
- BYTE $0x44
- BYTE $0x5c
- BYTE $0x78
-
- // currHash := hash(uint32(x>>8), shift)
- SHRQ $8, R14
- MOVL R14, R11
- IMULL $0x1e35a7bd, R11
- SHRL CX, R11
-
- // candidate = int(table[currHash])
- // XXX: MOVWQZX table-32768(SP)(R11*2), R15
- // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
- BYTE $0x4e
- BYTE $0x0f
- BYTE $0xb7
- BYTE $0x7c
- BYTE $0x5c
- BYTE $0x78
-
- // table[currHash] = uint16(s)
- ADDQ $1, AX
-
- // XXX: MOVW AX, table-32768(SP)(R11*2)
- // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
- BYTE $0x66
- BYTE $0x42
- BYTE $0x89
- BYTE $0x44
- BYTE $0x5c
- BYTE $0x78
-
- // if uint32(x>>8) == load32(src, candidate) { continue }
- MOVL (DX)(R15*1), BX
- CMPL R14, BX
- JEQ inner1
-
- // nextHash = hash(uint32(x>>16), shift)
- SHRQ $8, R14
- MOVL R14, R11
- IMULL $0x1e35a7bd, R11
- SHRL CX, R11
-
- // s++
- ADDQ $1, SI
-
- // break out of the inner1 for loop, i.e. continue the outer loop.
- JMP outer
-
-emitRemainder:
- // if nextEmit < len(src) { etc }
- MOVQ src_len+32(FP), AX
- ADDQ DX, AX
- CMPQ R10, AX
- JEQ encodeBlockEnd
-
- // d += emitLiteral(dst[d:], src[nextEmit:])
- //
- // Push args.
- MOVQ DI, 0(SP)
- MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
- MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
- MOVQ R10, 24(SP)
- SUBQ R10, AX
- MOVQ AX, 32(SP)
- MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
-
- // Spill local variables (registers) onto the stack; call; unspill.
- MOVQ DI, 80(SP)
- CALL ·emitLiteral(SB)
- MOVQ 80(SP), DI
-
- // Finish the "d +=" part of "d += emitLiteral(etc)".
- ADDQ 48(SP), DI
-
-encodeBlockEnd:
- MOVQ dst_base+0(FP), AX
- SUBQ AX, DI
- MOVQ DI, d+48(FP)
- RET
diff --git a/vendor/src/github.com/golang/snappy/encode_other.go b/vendor/src/github.com/golang/snappy/encode_other.go
deleted file mode 100644
index dbcae90..0000000
--- a/vendor/src/github.com/golang/snappy/encode_other.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2016 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !amd64 appengine !gc noasm
-
-package snappy
-
-func load32(b []byte, i int) uint32 {
- b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
- return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
-}
-
-func load64(b []byte, i int) uint64 {
- b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
-}
-
-// emitLiteral writes a literal chunk and returns the number of bytes written.
-//
-// It assumes that:
-// dst is long enough to hold the encoded bytes
-// 1 <= len(lit) && len(lit) <= 65536
-func emitLiteral(dst, lit []byte) int {
- i, n := 0, uint(len(lit)-1)
- switch {
- case n < 60:
- dst[0] = uint8(n)<<2 | tagLiteral
- i = 1
- case n < 1<<8:
- dst[0] = 60<<2 | tagLiteral
- dst[1] = uint8(n)
- i = 2
- default:
- dst[0] = 61<<2 | tagLiteral
- dst[1] = uint8(n)
- dst[2] = uint8(n >> 8)
- i = 3
- }
- return i + copy(dst[i:], lit)
-}
-
-// emitCopy writes a copy chunk and returns the number of bytes written.
-//
-// It assumes that:
-// dst is long enough to hold the encoded bytes
-// 1 <= offset && offset <= 65535
-// 4 <= length && length <= 65535
-func emitCopy(dst []byte, offset, length int) int {
- i := 0
- // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
- // threshold for this loop is a little higher (at 68 = 64 + 4), and the
- // length emitted down below is is a little lower (at 60 = 64 - 4), because
- // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
- // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
- // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
- // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
- // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
- // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
- for length >= 68 {
- // Emit a length 64 copy, encoded as 3 bytes.
- dst[i+0] = 63<<2 | tagCopy2
- dst[i+1] = uint8(offset)
- dst[i+2] = uint8(offset >> 8)
- i += 3
- length -= 64
- }
- if length > 64 {
- // Emit a length 60 copy, encoded as 3 bytes.
- dst[i+0] = 59<<2 | tagCopy2
- dst[i+1] = uint8(offset)
- dst[i+2] = uint8(offset >> 8)
- i += 3
- length -= 60
- }
- if length >= 12 || offset >= 2048 {
- // Emit the remaining copy, encoded as 3 bytes.
- dst[i+0] = uint8(length-1)<<2 | tagCopy2
- dst[i+1] = uint8(offset)
- dst[i+2] = uint8(offset >> 8)
- return i + 3
- }
- // Emit the remaining copy, encoded as 2 bytes.
- dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
- dst[i+1] = uint8(offset)
- return i + 2
-}
-
-// extendMatch returns the largest k such that k <= len(src) and that
-// src[i:i+k-j] and src[j:k] have the same contents.
-//
-// It assumes that:
-// 0 <= i && i < j && j <= len(src)
-func extendMatch(src []byte, i, j int) int {
- for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
- }
- return j
-}
-
-func hash(u, shift uint32) uint32 {
- return (u * 0x1e35a7bd) >> shift
-}
-
-// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
-// assumes that the varint-encoded length of the decompressed bytes has already
-// been written.
-//
-// It also assumes that:
-// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
-func encodeBlock(dst, src []byte) (d int) {
- // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
- // The table element type is uint16, as s < sLimit and sLimit < len(src)
- // and len(src) <= maxBlockSize and maxBlockSize == 65536.
- const (
- maxTableSize = 1 << 14
- // tableMask is redundant, but helps the compiler eliminate bounds
- // checks.
- tableMask = maxTableSize - 1
- )
- shift := uint32(32 - 8)
- for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
- shift--
- }
- // In Go, all array elements are zero-initialized, so there is no advantage
- // to a smaller tableSize per se. However, it matches the C++ algorithm,
- // and in the asm versions of this code, we can get away with zeroing only
- // the first tableSize elements.
- var table [maxTableSize]uint16
-
- // sLimit is when to stop looking for offset/length copies. The inputMargin
- // lets us use a fast path for emitLiteral in the main loop, while we are
- // looking for copies.
- sLimit := len(src) - inputMargin
-
- // nextEmit is where in src the next emitLiteral should start from.
- nextEmit := 0
-
- // The encoded form must start with a literal, as there are no previous
- // bytes to copy, so we start looking for hash matches at s == 1.
- s := 1
- nextHash := hash(load32(src, s), shift)
-
- for {
- // Copied from the C++ snappy implementation:
- //
- // Heuristic match skipping: If 32 bytes are scanned with no matches
- // found, start looking only at every other byte. If 32 more bytes are
- // scanned (or skipped), look at every third byte, etc.. When a match
- // is found, immediately go back to looking at every byte. This is a
- // small loss (~5% performance, ~0.1% density) for compressible data
- // due to more bookkeeping, but for non-compressible data (such as
- // JPEG) it's a huge win since the compressor quickly "realizes" the
- // data is incompressible and doesn't bother looking for matches
- // everywhere.
- //
- // The "skip" variable keeps track of how many bytes there are since
- // the last match; dividing it by 32 (ie. right-shifting by five) gives
- // the number of bytes to move ahead for each iteration.
- skip := 32
-
- nextS := s
- candidate := 0
- for {
- s = nextS
- bytesBetweenHashLookups := skip >> 5
- nextS = s + bytesBetweenHashLookups
- skip += bytesBetweenHashLookups
- if nextS > sLimit {
- goto emitRemainder
- }
- candidate = int(table[nextHash&tableMask])
- table[nextHash&tableMask] = uint16(s)
- nextHash = hash(load32(src, nextS), shift)
- if load32(src, s) == load32(src, candidate) {
- break
- }
- }
-
- // A 4-byte match has been found. We'll later see if more than 4 bytes
- // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
- // them as literal bytes.
- d += emitLiteral(dst[d:], src[nextEmit:s])
-
- // Call emitCopy, and then see if another emitCopy could be our next
- // move. Repeat until we find no match for the input immediately after
- // what was consumed by the last emitCopy call.
- //
- // If we exit this loop normally then we need to call emitLiteral next,
- // though we don't yet know how big the literal will be. We handle that
- // by proceeding to the next iteration of the main loop. We also can
- // exit this loop via goto if we get close to exhausting the input.
- for {
- // Invariant: we have a 4-byte match at s, and no need to emit any
- // literal bytes prior to s.
- base := s
-
- // Extend the 4-byte match as long as possible.
- //
- // This is an inlined version of:
- // s = extendMatch(src, candidate+4, s+4)
- s += 4
- for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
- }
-
- d += emitCopy(dst[d:], base-candidate, s-base)
- nextEmit = s
- if s >= sLimit {
- goto emitRemainder
- }
-
- // We could immediately start working at s now, but to improve
- // compression we first update the hash table at s-1 and at s. If
- // another emitCopy is not our next move, also calculate nextHash
- // at s+1. At least on GOARCH=amd64, these three hash calculations
- // are faster as one load64 call (with some shifts) instead of
- // three load32 calls.
- x := load64(src, s-1)
- prevHash := hash(uint32(x>>0), shift)
- table[prevHash&tableMask] = uint16(s - 1)
- currHash := hash(uint32(x>>8), shift)
- candidate = int(table[currHash&tableMask])
- table[currHash&tableMask] = uint16(s)
- if uint32(x>>8) != load32(src, candidate) {
- nextHash = hash(uint32(x>>16), shift)
- s++
- break
- }
- }
- }
-
-emitRemainder:
- if nextEmit < len(src) {
- d += emitLiteral(dst[d:], src[nextEmit:])
- }
- return d
-}
diff --git a/vendor/src/github.com/golang/snappy/golden_test.go b/vendor/src/github.com/golang/snappy/golden_test.go
deleted file mode 100644
index e4496f9..0000000
--- a/vendor/src/github.com/golang/snappy/golden_test.go
+++ /dev/null
@@ -1,1965 +0,0 @@
-// Copyright 2016 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package snappy
-
-// extendMatchGoldenTestCases is the i and j arguments, and the returned value,
-// for every extendMatch call issued when encoding the
-// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the
-// extendMatch implementation.
-//
-// It was generated manually by adding some print statements to the (pure Go)
-// extendMatch implementation:
-//
-// func extendMatch(src []byte, i, j int) int {
-// i0, j0 := i, j
-// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
-// }
-// println("{", i0, ",", j0, ",", j, "},")
-// return j
-// }
-//
-// and running "go test -test.run=EncodeGoldenInput -tags=noasm".
-var extendMatchGoldenTestCases = []struct {
- i, j, want int
-}{
- {11, 61, 62},
- {80, 81, 82},
- {86, 87, 101},
- {85, 133, 149},
- {152, 153, 162},
- {133, 168, 193},
- {168, 207, 225},
- {81, 255, 275},
- {278, 279, 283},
- {306, 417, 417},
- {373, 428, 430},
- {389, 444, 447},
- {474, 510, 512},
- {465, 533, 533},
- {47, 547, 547},
- {307, 551, 554},
- {420, 582, 587},
- {309, 604, 604},
- {604, 625, 625},
- {538, 629, 629},
- {328, 640, 640},
- {573, 645, 645},
- {319, 657, 657},
- {30, 664, 664},
- {45, 679, 680},
- {621, 684, 684},
- {376, 700, 700},
- {33, 707, 708},
- {601, 733, 733},
- {334, 744, 745},
- {625, 758, 759},
- {382, 763, 763},
- {550, 769, 771},
- {533, 789, 789},
- {804, 813, 813},
- {342, 841, 842},
- {742, 847, 847},
- {74, 852, 852},
- {810, 864, 864},
- {758, 868, 869},
- {714, 883, 883},
- {582, 889, 891},
- {61, 934, 935},
- {894, 942, 942},
- {939, 949, 949},
- {785, 956, 957},
- {886, 978, 978},
- {792, 998, 998},
- {998, 1005, 1005},
- {572, 1032, 1032},
- {698, 1051, 1053},
- {599, 1067, 1069},
- {1056, 1079, 1079},
- {942, 1089, 1090},
- {831, 1094, 1096},
- {1088, 1100, 1103},
- {732, 1113, 1114},
- {1037, 1118, 1118},
- {872, 1128, 1130},
- {1079, 1140, 1142},
- {332, 1162, 1162},
- {207, 1168, 1186},
- {1189, 1190, 1225},
- {105, 1229, 1230},
- {79, 1256, 1257},
- {1190, 1261, 1283},
- {255, 1306, 1306},
- {1319, 1339, 1358},
- {364, 1370, 1370},
- {955, 1378, 1380},
- {122, 1403, 1403},
- {1325, 1407, 1419},
- {664, 1423, 1424},
- {941, 1461, 1463},
- {867, 1477, 1478},
- {757, 1488, 1489},
- {1140, 1499, 1499},
- {31, 1506, 1506},
- {1487, 1510, 1512},
- {1089, 1520, 1521},
- {1467, 1525, 1529},
- {1394, 1537, 1537},
- {1499, 1541, 1541},
- {367, 1558, 1558},
- {1475, 1564, 1564},
- {1525, 1568, 1571},
- {1541, 1582, 1583},
- {864, 1587, 1588},
- {704, 1597, 1597},
- {336, 1602, 1602},
- {1383, 1613, 1613},
- {1498, 1617, 1618},
- {1051, 1623, 1625},
- {401, 1643, 1645},
- {1072, 1654, 1655},
- {1067, 1667, 1669},
- {699, 1673, 1674},
- {1587, 1683, 1684},
- {920, 1696, 1696},
- {1505, 1710, 1710},
- {1550, 1723, 1723},
- {996, 1727, 1727},
- {833, 1733, 1734},
- {1638, 1739, 1740},
- {1654, 1744, 1744},
- {753, 1761, 1761},
- {1548, 1773, 1773},
- {1568, 1777, 1780},
- {1683, 1793, 1794},
- {948, 1801, 1801},
- {1666, 1805, 1808},
- {1502, 1814, 1814},
- {1696, 1822, 1822},
- {502, 1836, 1837},
- {917, 1843, 1843},
- {1733, 1854, 1855},
- {970, 1859, 1859},
- {310, 1863, 1863},
- {657, 1872, 1872},
- {1005, 1876, 1876},
- {1662, 1880, 1880},
- {904, 1892, 1892},
- {1427, 1910, 1910},
- {1772, 1929, 1930},
- {1822, 1937, 1940},
- {1858, 1949, 1950},
- {1602, 1956, 1956},
- {1150, 1962, 1962},
- {1504, 1966, 1967},
- {51, 1971, 1971},
- {1605, 1979, 1979},
- {1458, 1983, 1988},
- {1536, 2001, 2006},
- {1373, 2014, 2018},
- {1494, 2025, 2025},
- {1667, 2029, 2031},
- {1592, 2035, 2035},
- {330, 2045, 2045},
- {1376, 2053, 2053},
- {1991, 2058, 2059},
- {1635, 2065, 2065},
- {1992, 2073, 2074},
- {2014, 2080, 2081},
- {1546, 2085, 2087},
- {59, 2099, 2099},
- {1996, 2106, 2106},
- {1836, 2110, 2110},
- {2068, 2114, 2114},
- {1338, 2122, 2122},
- {1562, 2128, 2130},
- {1934, 2134, 2134},
- {2114, 2141, 2142},
- {977, 2149, 2150},
- {956, 2154, 2155},
- {1407, 2162, 2162},
- {1773, 2166, 2166},
- {883, 2171, 2171},
- {623, 2175, 2178},
- {1520, 2191, 2192},
- {1162, 2200, 2200},
- {912, 2204, 2204},
- {733, 2208, 2208},
- {1777, 2212, 2215},
- {1532, 2219, 2219},
- {718, 2223, 2225},
- {2069, 2229, 2229},
- {2207, 2245, 2246},
- {1139, 2264, 2264},
- {677, 2274, 2274},
- {2099, 2279, 2279},
- {1863, 2283, 2283},
- {1966, 2305, 2306},
- {2279, 2313, 2313},
- {1628, 2319, 2319},
- {755, 2329, 2329},
- {1461, 2334, 2334},
- {2117, 2340, 2340},
- {2313, 2349, 2349},
- {1859, 2353, 2353},
- {1048, 2362, 2362},
- {895, 2366, 2366},
- {2278, 2373, 2373},
- {1884, 2377, 2377},
- {1402, 2387, 2392},
- {700, 2398, 2398},
- {1971, 2402, 2402},
- {2009, 2419, 2419},
- {1441, 2426, 2428},
- {2208, 2432, 2432},
- {2038, 2436, 2436},
- {932, 2443, 2443},
- {1759, 2447, 2448},
- {744, 2452, 2452},
- {1875, 2458, 2458},
- {2405, 2468, 2468},
- {1596, 2472, 2473},
- {1953, 2480, 2482},
- {736, 2487, 2487},
- {1913, 2493, 2493},
- {774, 2497, 2497},
- {1484, 2506, 2508},
- {2432, 2512, 2512},
- {752, 2519, 2519},
- {2497, 2523, 2523},
- {2409, 2528, 2529},
- {2122, 2533, 2533},
- {2396, 2537, 2538},
- {2410, 2547, 2548},
- {1093, 2555, 2560},
- {551, 2564, 2565},
- {2268, 2569, 2569},
- {1362, 2580, 2580},
- {1916, 2584, 2585},
- {994, 2589, 2590},
- {1979, 2596, 2596},
- {1041, 2602, 2602},
- {2104, 2614, 2616},
- {2609, 2621, 2628},
- {2329, 2638, 2638},
- {2211, 2657, 2658},
- {2638, 2662, 2667},
- {2578, 2676, 2679},
- {2153, 2685, 2686},
- {2608, 2696, 2697},
- {598, 2712, 2712},
- {2620, 2719, 2720},
- {1888, 2724, 2728},
- {2709, 2732, 2732},
- {1365, 2739, 2739},
- {784, 2747, 2748},
- {424, 2753, 2753},
- {2204, 2759, 2759},
- {812, 2768, 2769},
- {2455, 2773, 2773},
- {1722, 2781, 2781},
- {1917, 2792, 2792},
- {2705, 2799, 2799},
- {2685, 2806, 2807},
- {2742, 2811, 2811},
- {1370, 2818, 2818},
- {2641, 2830, 2830},
- {2512, 2837, 2837},
- {2457, 2841, 2841},
- {2756, 2845, 2845},
- {2719, 2855, 2855},
- {1423, 2859, 2859},
- {2849, 2863, 2865},
- {1474, 2871, 2871},
- {1161, 2875, 2876},
- {2282, 2880, 2881},
- {2746, 2888, 2888},
- {1783, 2893, 2893},
- {2401, 2899, 2900},
- {2632, 2920, 2923},
- {2422, 2928, 2930},
- {2715, 2939, 2939},
- {2162, 2943, 2943},
- {2859, 2947, 2947},
- {1910, 2951, 2951},
- {1431, 2955, 2956},
- {1439, 2964, 2964},
- {2501, 2968, 2969},
- {2029, 2973, 2976},
- {689, 2983, 2984},
- {1658, 2988, 2988},
- {1031, 2996, 2996},
- {2149, 3001, 3002},
- {25, 3009, 3013},
- {2964, 3023, 3023},
- {953, 3027, 3028},
- {2359, 3036, 3036},
- {3023, 3049, 3049},
- {2880, 3055, 3056},
- {2973, 3076, 3077},
- {2874, 3090, 3090},
- {2871, 3094, 3094},
- {2532, 3100, 3100},
- {2938, 3107, 3108},
- {350, 3115, 3115},
- {2196, 3119, 3121},
- {1133, 3127, 3129},
- {1797, 3134, 3150},
- {3032, 3158, 3158},
- {3016, 3172, 3172},
- {2533, 3179, 3179},
- {3055, 3187, 3188},
- {1384, 3192, 3193},
- {2799, 3199, 3199},
- {2126, 3203, 3207},
- {2334, 3215, 3215},
- {2105, 3220, 3221},
- {3199, 3229, 3229},
- {2891, 3233, 3233},
- {855, 3240, 3240},
- {1852, 3253, 3256},
- {2140, 3263, 3263},
- {1682, 3268, 3270},
- {3243, 3274, 3274},
- {924, 3279, 3279},
- {2212, 3283, 3283},
- {2596, 3287, 3287},
- {2999, 3291, 3291},
- {2353, 3295, 3295},
- {2480, 3302, 3304},
- {1959, 3308, 3311},
- {3000, 3318, 3318},
- {845, 3330, 3330},
- {2283, 3334, 3334},
- {2519, 3342, 3342},
- {3325, 3346, 3348},
- {2397, 3353, 3354},
- {2763, 3358, 3358},
- {3198, 3363, 3364},
- {3211, 3368, 3372},
- {2950, 3376, 3377},
- {3245, 3388, 3391},
- {2264, 3398, 3398},
- {795, 3403, 3403},
- {3287, 3407, 3407},
- {3358, 3411, 3411},
- {3317, 3415, 3415},
- {3232, 3431, 3431},
- {2128, 3435, 3437},
- {3236, 3441, 3441},
- {3398, 3445, 3446},
- {2814, 3450, 3450},
- {3394, 3466, 3466},
- {2425, 3470, 3470},
- {3330, 3476, 3476},
- {1612, 3480, 3480},
- {1004, 3485, 3486},
- {2732, 3490, 3490},
- {1117, 3494, 3495},
- {629, 3501, 3501},
- {3087, 3514, 3514},
- {684, 3518, 3518},
- {3489, 3522, 3524},
- {1760, 3529, 3529},
- {617, 3537, 3537},
- {3431, 3541, 3541},
- {997, 3547, 3547},
- {882, 3552, 3553},
- {2419, 3558, 3558},
- {610, 3562, 3563},
- {1903, 3567, 3569},
- {3005, 3575, 3575},
- {3076, 3585, 3586},
- {3541, 3590, 3590},
- {3490, 3594, 3594},
- {1899, 3599, 3599},
- {3545, 3606, 3606},
- {3290, 3614, 3615},
- {2056, 3619, 3620},
- {3556, 3625, 3625},
- {3294, 3632, 3633},
- {637, 3643, 3644},
- {3609, 3648, 3650},
- {3175, 3658, 3658},
- {3498, 3665, 3665},
- {1597, 3669, 3669},
- {1983, 3673, 3673},
- {3215, 3682, 3682},
- {3544, 3689, 3689},
- {3694, 3698, 3698},
- {3228, 3715, 3716},
- {2594, 3720, 3722},
- {3573, 3726, 3726},
- {2479, 3732, 3735},
- {3191, 3741, 3742},
- {1113, 3746, 3747},
- {2844, 3751, 3751},
- {3445, 3756, 3757},
- {3755, 3766, 3766},
- {3421, 3775, 3780},
- {3593, 3784, 3786},
- {3263, 3796, 3796},
- {3469, 3806, 3806},
- {2602, 3815, 3815},
- {723, 3819, 3821},
- {1608, 3826, 3826},
- {3334, 3830, 3830},
- {2198, 3835, 3835},
- {2635, 3840, 3840},
- {3702, 3852, 3853},
- {3406, 3858, 3859},
- {3681, 3867, 3870},
- {3407, 3880, 3880},
- {340, 3889, 3889},
- {3772, 3893, 3893},
- {593, 3897, 3897},
- {2563, 3914, 3916},
- {2981, 3929, 3929},
- {1835, 3933, 3934},
- {3906, 3951, 3951},
- {1459, 3958, 3958},
- {3889, 3974, 3974},
- {2188, 3982, 3982},
- {3220, 3986, 3987},
- {3585, 3991, 3993},
- {3712, 3997, 4001},
- {2805, 4007, 4007},
- {1879, 4012, 4013},
- {3618, 4018, 4018},
- {1145, 4031, 4032},
- {3901, 4037, 4037},
- {2772, 4046, 4047},
- {2802, 4053, 4054},
- {3299, 4058, 4058},
- {3725, 4066, 4066},
- {2271, 4070, 4070},
- {385, 4075, 4076},
- {3624, 4089, 4090},
- {3745, 4096, 4098},
- {1563, 4102, 4102},
- {4045, 4106, 4111},
- {3696, 4115, 4119},
- {3376, 4125, 4126},
- {1880, 4130, 4130},
- {2048, 4140, 4141},
- {2724, 4149, 4149},
- {1767, 4156, 4156},
- {2601, 4164, 4164},
- {2757, 4168, 4168},
- {3974, 4172, 4172},
- {3914, 4178, 4178},
- {516, 4185, 4185},
- {1032, 4189, 4190},
- {3462, 4197, 4198},
- {3805, 4202, 4203},
- {3910, 4207, 4212},
- {3075, 4221, 4221},
- {3756, 4225, 4226},
- {1872, 4236, 4237},
- {3844, 4241, 4241},
- {3991, 4245, 4249},
- {2203, 4258, 4258},
- {3903, 4267, 4268},
- {705, 4272, 4272},
- {1896, 4276, 4276},
- {1955, 4285, 4288},
- {3746, 4302, 4303},
- {2672, 4311, 4311},
- {3969, 4317, 4317},
- {3883, 4322, 4322},
- {1920, 4339, 4340},
- {3527, 4344, 4346},
- {1160, 4358, 4358},
- {3648, 4364, 4366},
- {2711, 4387, 4387},
- {3619, 4391, 4392},
- {1944, 4396, 4396},
- {4369, 4400, 4400},
- {2736, 4404, 4407},
- {2546, 4411, 4412},
- {4390, 4422, 4422},
- {3610, 4426, 4427},
- {4058, 4431, 4431},
- {4374, 4435, 4435},
- {3463, 4445, 4446},
- {1813, 4452, 4452},
- {3669, 4456, 4456},
- {3830, 4460, 4460},
- {421, 4464, 4465},
- {1719, 4471, 4471},
- {3880, 4475, 4475},
- {1834, 4485, 4487},
- {3590, 4491, 4491},
- {442, 4496, 4497},
- {4435, 4501, 4501},
- {3814, 4509, 4509},
- {987, 4513, 4513},
- {4494, 4518, 4521},
- {3218, 4526, 4529},
- {4221, 4537, 4537},
- {2778, 4543, 4545},
- {4422, 4552, 4552},
- {4031, 4558, 4559},
- {4178, 4563, 4563},
- {3726, 4567, 4574},
- {4027, 4578, 4578},
- {4339, 4585, 4587},
- {3796, 4592, 4595},
- {543, 4600, 4613},
- {2855, 4620, 4621},
- {2795, 4627, 4627},
- {3440, 4631, 4632},
- {4279, 4636, 4639},
- {4245, 4643, 4645},
- {4516, 4649, 4650},
- {3133, 4654, 4654},
- {4042, 4658, 4659},
- {3422, 4663, 4663},
- {4046, 4667, 4668},
- {4267, 4672, 4672},
- {4004, 4676, 4677},
- {2490, 4682, 4682},
- {2451, 4697, 4697},
- {3027, 4705, 4705},
- {4028, 4717, 4717},
- {4460, 4721, 4721},
- {2471, 4725, 4727},
- {3090, 4735, 4735},
- {3192, 4739, 4740},
- {3835, 4760, 4760},
- {4540, 4764, 4764},
- {4007, 4772, 4774},
- {619, 4784, 4784},
- {3561, 4789, 4791},
- {3367, 4805, 4805},
- {4490, 4810, 4811},
- {2402, 4815, 4815},
- {3352, 4819, 4822},
- {2773, 4828, 4828},
- {4552, 4832, 4832},
- {2522, 4840, 4841},
- {316, 4847, 4852},
- {4715, 4858, 4858},
- {2959, 4862, 4862},
- {4858, 4868, 4869},
- {2134, 4873, 4873},
- {578, 4878, 4878},
- {4189, 4889, 4890},
- {2229, 4894, 4894},
- {4501, 4898, 4898},
- {2297, 4903, 4903},
- {2933, 4909, 4909},
- {3008, 4913, 4913},
- {3153, 4917, 4917},
- {4819, 4921, 4921},
- {4921, 4932, 4933},
- {4920, 4944, 4945},
- {4814, 4954, 4955},
- {576, 4966, 4966},
- {1854, 4970, 4971},
- {1374, 4975, 4976},
- {3307, 4980, 4980},
- {974, 4984, 4988},
- {4721, 4992, 4992},
- {4898, 4996, 4996},
- {4475, 5006, 5006},
- {3819, 5012, 5012},
- {1948, 5019, 5021},
- {4954, 5027, 5029},
- {3740, 5038, 5040},
- {4763, 5044, 5045},
- {1936, 5051, 5051},
- {4844, 5055, 5060},
- {4215, 5069, 5072},
- {1146, 5076, 5076},
- {3845, 5082, 5082},
- {4865, 5090, 5090},
- {4624, 5094, 5094},
- {4815, 5098, 5098},
- {5006, 5105, 5105},
- {4980, 5109, 5109},
- {4795, 5113, 5115},
- {5043, 5119, 5121},
- {4782, 5129, 5129},
- {3826, 5139, 5139},
- {3876, 5156, 5156},
- {3111, 5167, 5171},
- {1470, 5177, 5177},
- {4431, 5181, 5181},
- {546, 5189, 5189},
- {4225, 5193, 5193},
- {1672, 5199, 5201},
- {4207, 5205, 5209},
- {4220, 5216, 5217},
- {4658, 5224, 5225},
- {3295, 5235, 5235},
- {2436, 5239, 5239},
- {2349, 5246, 5246},
- {2175, 5250, 5250},
- {5180, 5257, 5258},
- {3161, 5263, 5263},
- {5105, 5272, 5272},
- {3552, 5282, 5282},
- {4944, 5299, 5300},
- {4130, 5312, 5313},
- {902, 5323, 5323},
- {913, 5327, 5327},
- {2987, 5333, 5334},
- {5150, 5344, 5344},
- {5249, 5348, 5348},
- {1965, 5358, 5359},
- {5330, 5364, 5364},
- {2012, 5373, 5377},
- {712, 5384, 5386},
- {5235, 5390, 5390},
- {5044, 5398, 5399},
- {564, 5406, 5406},
- {39, 5410, 5410},
- {4642, 5422, 5425},
- {4421, 5437, 5438},
- {2347, 5449, 5449},
- {5333, 5453, 5454},
- {4136, 5458, 5459},
- {3793, 5468, 5468},
- {2243, 5480, 5480},
- {4889, 5492, 5493},
- {4295, 5504, 5504},
- {2785, 5511, 5511},
- {2377, 5518, 5518},
- {3662, 5525, 5525},
- {5097, 5529, 5530},
- {4781, 5537, 5538},
- {4697, 5547, 5548},
- {436, 5552, 5553},
- {5542, 5558, 5558},
- {3692, 5562, 5562},
- {2696, 5568, 5569},
- {4620, 5578, 5578},
- {2898, 5590, 5590},
- {5557, 5596, 5618},
- {2797, 5623, 5625},
- {2792, 5629, 5629},
- {5243, 5633, 5633},
- {5348, 5637, 5637},
- {5547, 5643, 5643},
- {4296, 5654, 5655},
- {5568, 5662, 5662},
- {3001, 5670, 5671},
- {3794, 5679, 5679},
- {4006, 5685, 5686},
- {4969, 5690, 5692},
- {687, 5704, 5704},
- {4563, 5708, 5708},
- {1723, 5738, 5738},
- {649, 5742, 5742},
- {5163, 5748, 5755},
- {3907, 5759, 5759},
- {3074, 5764, 5764},
- {5326, 5771, 5771},
- {2951, 5776, 5776},
- {5181, 5780, 5780},
- {2614, 5785, 5788},
- {4709, 5794, 5794},
- {2784, 5799, 5799},
- {5518, 5803, 5803},
- {4155, 5812, 5815},
- {921, 5819, 5819},
- {5224, 5823, 5824},
- {2853, 5830, 5836},
- {5776, 5840, 5840},
- {2955, 5844, 5845},
- {5745, 5853, 5853},
- {3291, 5857, 5857},
- {2988, 5861, 5861},
- {2647, 5865, 5865},
- {5398, 5869, 5870},
- {1085, 5874, 5875},
- {4906, 5881, 5881},
- {802, 5886, 5886},
- {5119, 5890, 5893},
- {5802, 5899, 5900},
- {3415, 5904, 5904},
- {5629, 5908, 5908},
- {3714, 5912, 5914},
- {5558, 5921, 5921},
- {2710, 5927, 5928},
- {1094, 5932, 5934},
- {2653, 5940, 5941},
- {4735, 5954, 5954},
- {5861, 5958, 5958},
- {1040, 5971, 5971},
- {5514, 5977, 5977},
- {5048, 5981, 5982},
- {5953, 5992, 5993},
- {3751, 5997, 5997},
- {4991, 6001, 6002},
- {5885, 6006, 6007},
- {5529, 6011, 6012},
- {4974, 6019, 6020},
- {5857, 6024, 6024},
- {3483, 6032, 6032},
- {3594, 6036, 6036},
- {1997, 6040, 6040},
- {5997, 6044, 6047},
- {5197, 6051, 6051},
- {1764, 6055, 6055},
- {6050, 6059, 6059},
- {5239, 6063, 6063},
- {5049, 6067, 6067},
- {5957, 6073, 6074},
- {1022, 6078, 6078},
- {3414, 6083, 6084},
- {3809, 6090, 6090},
- {4562, 6095, 6096},
- {5878, 6104, 6104},
- {594, 6108, 6109},
- {3353, 6115, 6116},
- {4992, 6120, 6121},
- {2424, 6125, 6125},
- {4484, 6130, 6130},
- {3900, 6134, 6135},
- {5793, 6139, 6141},
- {3562, 6145, 6145},
- {1438, 6152, 6153},
- {6058, 6157, 6158},
- {4411, 6162, 6163},
- {4590, 6167, 6171},
- {4748, 6175, 6175},
- {5517, 6183, 6184},
- {6095, 6191, 6192},
- {1471, 6203, 6203},
- {2643, 6209, 6210},
- {450, 6220, 6220},
- {5266, 6226, 6226},
- {2576, 6233, 6233},
- {2607, 6239, 6240},
- {5164, 6244, 6251},
- {6054, 6255, 6255},
- {1789, 6260, 6261},
- {5250, 6265, 6265},
- {6062, 6273, 6278},
- {5990, 6282, 6282},
- {3283, 6286, 6286},
- {5436, 6290, 6290},
- {6059, 6294, 6294},
- {5668, 6298, 6300},
- {3072, 6324, 6329},
- {3132, 6338, 6339},
- {3246, 6343, 6344},
- {28, 6348, 6349},
- {1503, 6353, 6355},
- {6067, 6359, 6359},
- {3384, 6364, 6364},
- {545, 6375, 6376},
- {5803, 6380, 6380},
- {5522, 6384, 6385},
- {5908, 6389, 6389},
- {2796, 6393, 6396},
- {4831, 6403, 6404},
- {6388, 6412, 6412},
- {6005, 6417, 6420},
- {4450, 6430, 6430},
- {4050, 6435, 6435},
- {5372, 6441, 6441},
- {4378, 6447, 6447},
- {6199, 6452, 6452},
- {3026, 6456, 6456},
- {2642, 6460, 6462},
- {6392, 6470, 6470},
- {6459, 6474, 6474},
- {2829, 6487, 6488},
- {2942, 6499, 6504},
- {5069, 6508, 6511},
- {5341, 6515, 6516},
- {5853, 6521, 6525},
- {6104, 6531, 6531},
- {5759, 6535, 6538},
- {4672, 6542, 6543},
- {2443, 6550, 6550},
- {5109, 6554, 6554},
- {6494, 6558, 6560},
- {6006, 6570, 6572},
- {6424, 6576, 6580},
- {4693, 6591, 6592},
- {6439, 6596, 6597},
- {3179, 6601, 6601},
- {5299, 6606, 6607},
- {4148, 6612, 6613},
- {3774, 6617, 6617},
- {3537, 6623, 6624},
- {4975, 6628, 6629},
- {3848, 6636, 6636},
- {856, 6640, 6640},
- {5724, 6645, 6645},
- {6632, 6651, 6651},
- {4630, 6656, 6658},
- {1440, 6662, 6662},
- {4281, 6666, 6667},
- {4302, 6671, 6672},
- {2589, 6676, 6677},
- {5647, 6681, 6687},
- {6082, 6691, 6693},
- {6144, 6698, 6698},
- {6103, 6709, 6710},
- {3710, 6714, 6714},
- {4253, 6718, 6721},
- {2467, 6730, 6730},
- {4778, 6734, 6734},
- {6528, 6738, 6738},
- {4358, 6747, 6747},
- {5889, 6753, 6753},
- {5193, 6757, 6757},
- {5797, 6761, 6761},
- {3858, 6765, 6766},
- {5951, 6776, 6776},
- {6487, 6781, 6782},
- {3282, 6786, 6787},
- {4667, 6797, 6799},
- {1927, 6803, 6806},
- {6583, 6810, 6810},
- {4937, 6814, 6814},
- {6099, 6824, 6824},
- {4415, 6835, 6836},
- {6332, 6840, 6841},
- {5160, 6850, 6850},
- {4764, 6854, 6854},
- {6814, 6858, 6859},
- {3018, 6864, 6864},
- {6293, 6868, 6869},
- {6359, 6877, 6877},
- {3047, 6884, 6886},
- {5262, 6890, 6891},
- {5471, 6900, 6900},
- {3268, 6910, 6912},
- {1047, 6916, 6916},
- {5904, 6923, 6923},
- {5798, 6933, 6938},
- {4149, 6942, 6942},
- {1821, 6946, 6946},
- {3599, 6952, 6952},
- {6470, 6957, 6957},
- {5562, 6961, 6961},
- {6268, 6965, 6967},
- {6389, 6971, 6971},
- {6596, 6975, 6976},
- {6553, 6980, 6981},
- {6576, 6985, 6989},
- {1375, 6993, 6993},
- {652, 6998, 6998},
- {4876, 7002, 7003},
- {5768, 7011, 7013},
- {3973, 7017, 7017},
- {6802, 7025, 7025},
- {6955, 7034, 7036},
- {6974, 7040, 7040},
- {5944, 7044, 7044},
- {6992, 7048, 7054},
- {6872, 7059, 7059},
- {2943, 7063, 7063},
- {6923, 7067, 7067},
- {5094, 7071, 7071},
- {4873, 7075, 7075},
- {5819, 7079, 7079},
- {5945, 7085, 7085},
- {1540, 7090, 7091},
- {2090, 7095, 7095},
- {5024, 7104, 7105},
- {6900, 7109, 7109},
- {6024, 7113, 7114},
- {6000, 7118, 7120},
- {2187, 7124, 7125},
- {6760, 7129, 7130},
- {5898, 7134, 7136},
- {7032, 7144, 7144},
- {4271, 7148, 7148},
- {3706, 7152, 7152},
- {6970, 7156, 7157},
- {7088, 7161, 7163},
- {2718, 7168, 7169},
- {5674, 7175, 7175},
- {4631, 7182, 7182},
- {7070, 7188, 7189},
- {6220, 7196, 7196},
- {3458, 7201, 7202},
- {2041, 7211, 7212},
- {1454, 7216, 7216},
- {5199, 7225, 7227},
- {3529, 7234, 7234},
- {6890, 7238, 7238},
- {3815, 7242, 7243},
- {5490, 7250, 7253},
- {6554, 7257, 7263},
- {5890, 7267, 7269},
- {6877, 7273, 7273},
- {4877, 7277, 7277},
- {2502, 7285, 7285},
- {1483, 7289, 7295},
- {7210, 7304, 7308},
- {6845, 7313, 7316},
- {7219, 7320, 7320},
- {7001, 7325, 7329},
- {6853, 7333, 7334},
- {6120, 7338, 7338},
- {6606, 7342, 7343},
- {7020, 7348, 7350},
- {3509, 7354, 7354},
- {7133, 7359, 7363},
- {3434, 7371, 7374},
- {2787, 7384, 7384},
- {7044, 7388, 7388},
- {6960, 7394, 7395},
- {6676, 7399, 7400},
- {7161, 7404, 7404},
- {7285, 7417, 7418},
- {4558, 7425, 7426},
- {4828, 7430, 7430},
- {6063, 7436, 7436},
- {3597, 7442, 7442},
- {914, 7446, 7446},
- {7320, 7452, 7454},
- {7267, 7458, 7460},
- {5076, 7464, 7464},
- {7430, 7468, 7469},
- {6273, 7473, 7474},
- {7440, 7478, 7487},
- {7348, 7491, 7494},
- {1021, 7510, 7510},
- {7473, 7515, 7515},
- {2823, 7519, 7519},
- {6264, 7527, 7527},
- {7302, 7531, 7531},
- {7089, 7535, 7535},
- {7342, 7540, 7541},
- {3688, 7547, 7551},
- {3054, 7558, 7560},
- {4177, 7566, 7567},
- {6691, 7574, 7575},
- {7156, 7585, 7586},
- {7147, 7590, 7592},
- {7407, 7598, 7598},
- {7403, 7602, 7603},
- {6868, 7607, 7607},
- {6636, 7611, 7611},
- {4805, 7617, 7617},
- {5779, 7623, 7623},
- {7063, 7627, 7627},
- {5079, 7632, 7632},
- {7377, 7637, 7637},
- {7337, 7641, 7642},
- {6738, 7655, 7655},
- {7338, 7659, 7659},
- {6541, 7669, 7671},
- {595, 7675, 7675},
- {7658, 7679, 7680},
- {7647, 7685, 7686},
- {2477, 7690, 7690},
- {5823, 7694, 7694},
- {4156, 7699, 7699},
- {5931, 7703, 7706},
- {6854, 7712, 7712},
- {4931, 7718, 7718},
- {6979, 7722, 7722},
- {5085, 7727, 7727},
- {6965, 7732, 7732},
- {7201, 7736, 7737},
- {3639, 7741, 7743},
- {7534, 7749, 7749},
- {4292, 7753, 7753},
- {3427, 7759, 7763},
- {7273, 7767, 7767},
- {940, 7778, 7778},
- {4838, 7782, 7785},
- {4216, 7790, 7792},
- {922, 7800, 7801},
- {7256, 7810, 7811},
- {7789, 7815, 7819},
- {7225, 7823, 7825},
- {7531, 7829, 7829},
- {6997, 7833, 7833},
- {7757, 7837, 7838},
- {4129, 7842, 7842},
- {7333, 7848, 7849},
- {6776, 7855, 7855},
- {7527, 7859, 7859},
- {4370, 7863, 7863},
- {4512, 7868, 7868},
- {5679, 7880, 7880},
- {3162, 7884, 7885},
- {3933, 7892, 7894},
- {7804, 7899, 7902},
- {6363, 7906, 7907},
- {7848, 7911, 7912},
- {5584, 7917, 7921},
- {874, 7926, 7926},
- {3342, 7930, 7930},
- {4507, 7935, 7937},
- {3672, 7943, 7944},
- {7911, 7948, 7949},
- {6402, 7956, 7956},
- {7940, 7960, 7960},
- {7113, 7964, 7964},
- {1073, 7968, 7968},
- {7740, 7974, 7974},
- {7601, 7978, 7982},
- {6797, 7987, 7988},
- {3528, 7994, 7995},
- {5483, 7999, 7999},
- {5717, 8011, 8011},
- {5480, 8017, 8017},
- {7770, 8023, 8030},
- {2452, 8034, 8034},
- {5282, 8047, 8047},
- {7967, 8051, 8051},
- {1128, 8058, 8066},
- {6348, 8070, 8070},
- {8055, 8077, 8077},
- {7925, 8081, 8086},
- {6810, 8090, 8090},
- {5051, 8101, 8101},
- {4696, 8109, 8110},
- {5129, 8119, 8119},
- {4449, 8123, 8123},
- {7222, 8127, 8127},
- {4649, 8131, 8134},
- {7994, 8138, 8138},
- {5954, 8148, 8148},
- {475, 8152, 8153},
- {7906, 8157, 8157},
- {7458, 8164, 8166},
- {7632, 8171, 8173},
- {3874, 8177, 8183},
- {4391, 8187, 8187},
- {561, 8191, 8191},
- {2417, 8195, 8195},
- {2357, 8204, 8204},
- {2269, 8216, 8218},
- {3968, 8222, 8222},
- {2200, 8226, 8227},
- {3453, 8247, 8247},
- {2439, 8251, 8252},
- {7175, 8257, 8257},
- {976, 8262, 8264},
- {4953, 8273, 8273},
- {4219, 8278, 8278},
- {6, 8285, 8291},
- {5703, 8295, 8296},
- {5272, 8300, 8300},
- {8037, 8304, 8304},
- {8186, 8314, 8314},
- {8304, 8318, 8318},
- {8051, 8326, 8326},
- {8318, 8330, 8330},
- {2671, 8334, 8335},
- {2662, 8339, 8339},
- {8081, 8349, 8350},
- {3328, 8356, 8356},
- {2879, 8360, 8362},
- {8050, 8370, 8371},
- {8330, 8375, 8376},
- {8375, 8386, 8386},
- {4961, 8390, 8390},
- {1017, 8403, 8405},
- {3533, 8416, 8416},
- {4555, 8422, 8422},
- {6445, 8426, 8426},
- {8169, 8432, 8432},
- {990, 8436, 8436},
- {4102, 8440, 8440},
- {7398, 8444, 8446},
- {3480, 8450, 8450},
- {6324, 8462, 8462},
- {7948, 8466, 8467},
- {5950, 8471, 8471},
- {5189, 8476, 8476},
- {4026, 8490, 8490},
- {8374, 8494, 8495},
- {4682, 8501, 8501},
- {7387, 8506, 8506},
- {8164, 8510, 8515},
- {4079, 8524, 8524},
- {8360, 8529, 8531},
- {7446, 8540, 8543},
- {7971, 8547, 8548},
- {4311, 8552, 8552},
- {5204, 8556, 8557},
- {7968, 8562, 8562},
- {7847, 8571, 8573},
- {8547, 8577, 8577},
- {5320, 8581, 8581},
- {8556, 8585, 8586},
- {8504, 8590, 8590},
- {7669, 8602, 8604},
- {5874, 8608, 8609},
- {5828, 8613, 8613},
- {7998, 8617, 8617},
- {8519, 8625, 8625},
- {7250, 8637, 8637},
- {426, 8641, 8641},
- {8436, 8645, 8645},
- {5986, 8649, 8656},
- {8157, 8660, 8660},
- {7182, 8665, 8665},
- {8421, 8675, 8675},
- {8509, 8681, 8681},
- {5137, 8688, 8689},
- {8625, 8694, 8695},
- {5228, 8701, 8702},
- {6661, 8714, 8714},
- {1010, 8719, 8719},
- {6648, 8723, 8723},
- {3500, 8728, 8728},
- {2442, 8735, 8735},
- {8494, 8740, 8741},
- {8171, 8753, 8755},
- {7242, 8763, 8764},
- {4739, 8768, 8769},
- {7079, 8773, 8773},
- {8386, 8777, 8777},
- {8624, 8781, 8787},
- {661, 8791, 8794},
- {8631, 8801, 8801},
- {7753, 8805, 8805},
- {4783, 8809, 8810},
- {1673, 8814, 8815},
- {6623, 8819, 8819},
- {4404, 8823, 8823},
- {8089, 8827, 8828},
- {8773, 8832, 8832},
- {5394, 8836, 8836},
- {6231, 8841, 8843},
- {1015, 8852, 8853},
- {6873, 8857, 8857},
- {6289, 8865, 8865},
- {8577, 8869, 8869},
- {8114, 8873, 8875},
- {8534, 8883, 8883},
- {3007, 8887, 8888},
- {8827, 8892, 8893},
- {4788, 8897, 8900},
- {5698, 8906, 8907},
- {7690, 8911, 8911},
- {6643, 8919, 8919},
- {7206, 8923, 8924},
- {7866, 8929, 8931},
- {8880, 8942, 8942},
- {8630, 8951, 8952},
- {6027, 8958, 8958},
- {7749, 8966, 8967},
- {4932, 8972, 8973},
- {8892, 8980, 8981},
- {634, 9003, 9003},
- {8109, 9007, 9008},
- {8777, 9012, 9012},
- {3981, 9016, 9017},
- {5723, 9025, 9025},
- {7662, 9034, 9038},
- {8955, 9042, 9042},
- {8070, 9060, 9062},
- {8910, 9066, 9066},
- {5363, 9070, 9071},
- {7699, 9075, 9076},
- {8991, 9081, 9081},
- {6850, 9085, 9085},
- {5811, 9092, 9094},
- {9079, 9098, 9102},
- {6456, 9106, 9106},
- {2259, 9111, 9111},
- {4752, 9116, 9116},
- {9060, 9120, 9123},
- {8090, 9127, 9127},
- {5305, 9131, 9132},
- {8623, 9137, 9137},
- {7417, 9141, 9141},
- {6564, 9148, 9149},
- {9126, 9157, 9158},
- {4285, 9169, 9170},
- {8698, 9174, 9174},
- {8869, 9178, 9178},
- {2572, 9182, 9183},
- {6482, 9188, 9190},
- {9181, 9201, 9201},
- {2968, 9208, 9209},
- {2506, 9213, 9215},
- {9127, 9219, 9219},
- {7910, 9225, 9227},
- {5422, 9235, 9239},
- {8813, 9244, 9246},
- {9178, 9250, 9250},
- {8748, 9255, 9255},
- {7354, 9265, 9265},
- {7767, 9269, 9269},
- {7710, 9281, 9283},
- {8826, 9288, 9290},
- {861, 9295, 9295},
- {4482, 9301, 9301},
- {9264, 9305, 9306},
- {8805, 9310, 9310},
- {4995, 9314, 9314},
- {6730, 9318, 9318},
- {7457, 9328, 9328},
- {2547, 9335, 9336},
- {6298, 9340, 9343},
- {9305, 9353, 9354},
- {9269, 9358, 9358},
- {6338, 9370, 9370},
- {7289, 9376, 9379},
- {5780, 9383, 9383},
- {7607, 9387, 9387},
- {2065, 9392, 9392},
- {7238, 9396, 9396},
- {8856, 9400, 9400},
- {8069, 9412, 9413},
- {611, 9420, 9420},
- {7071, 9424, 9424},
- {3089, 9430, 9431},
- {7117, 9435, 9438},
- {1976, 9445, 9445},
- {6640, 9449, 9449},
- {5488, 9453, 9453},
- {8739, 9457, 9459},
- {5958, 9466, 9466},
- {7985, 9470, 9470},
- {8735, 9475, 9475},
- {5009, 9479, 9479},
- {8073, 9483, 9484},
- {2328, 9490, 9491},
- {9250, 9495, 9495},
- {4043, 9502, 9502},
- {7712, 9506, 9506},
- {9012, 9510, 9510},
- {9028, 9514, 9515},
- {2190, 9521, 9524},
- {9029, 9528, 9528},
- {9519, 9532, 9532},
- {9495, 9536, 9536},
- {8527, 9540, 9540},
- {2137, 9550, 9550},
- {8419, 9557, 9557},
- {9383, 9561, 9562},
- {8970, 9575, 9578},
- {8911, 9582, 9582},
- {7828, 9595, 9596},
- {6180, 9600, 9600},
- {8738, 9604, 9607},
- {7540, 9611, 9612},
- {9599, 9616, 9618},
- {9187, 9623, 9623},
- {9294, 9628, 9629},
- {4536, 9639, 9639},
- {3867, 9643, 9643},
- {6305, 9648, 9648},
- {1617, 9654, 9657},
- {5762, 9666, 9666},
- {8314, 9670, 9670},
- {9666, 9674, 9675},
- {9506, 9679, 9679},
- {9669, 9685, 9686},
- {9683, 9690, 9690},
- {8763, 9697, 9698},
- {7468, 9702, 9702},
- {460, 9707, 9707},
- {3115, 9712, 9712},
- {9424, 9716, 9717},
- {7359, 9721, 9724},
- {7547, 9728, 9729},
- {7151, 9733, 9738},
- {7627, 9742, 9742},
- {2822, 9747, 9747},
- {8247, 9751, 9753},
- {9550, 9758, 9758},
- {7585, 9762, 9763},
- {1002, 9767, 9767},
- {7168, 9772, 9773},
- {6941, 9777, 9780},
- {9728, 9784, 9786},
- {9770, 9792, 9796},
- {6411, 9801, 9802},
- {3689, 9806, 9808},
- {9575, 9814, 9816},
- {7025, 9820, 9821},
- {2776, 9826, 9826},
- {9806, 9830, 9830},
- {9820, 9834, 9835},
- {9800, 9839, 9847},
- {9834, 9851, 9852},
- {9829, 9856, 9862},
- {1400, 9866, 9866},
- {3197, 9870, 9871},
- {9851, 9875, 9876},
- {9742, 9883, 9884},
- {3362, 9888, 9889},
- {9883, 9893, 9893},
- {5711, 9899, 9910},
- {7806, 9915, 9915},
- {9120, 9919, 9919},
- {9715, 9925, 9934},
- {2580, 9938, 9938},
- {4907, 9942, 9944},
- {6239, 9953, 9954},
- {6961, 9963, 9963},
- {5295, 9967, 9968},
- {1915, 9972, 9973},
- {3426, 9983, 9985},
- {9875, 9994, 9995},
- {6942, 9999, 9999},
- {6621, 10005, 10005},
- {7589, 10010, 10012},
- {9286, 10020, 10020},
- {838, 10024, 10024},
- {9980, 10028, 10031},
- {9994, 10035, 10041},
- {2702, 10048, 10051},
- {2621, 10059, 10059},
- {10054, 10065, 10065},
- {8612, 10073, 10074},
- {7033, 10078, 10078},
- {916, 10082, 10082},
- {10035, 10086, 10087},
- {8613, 10097, 10097},
- {9919, 10107, 10108},
- {6133, 10114, 10115},
- {10059, 10119, 10119},
- {10065, 10126, 10127},
- {7732, 10131, 10131},
- {7155, 10135, 10136},
- {6728, 10140, 10140},
- {6162, 10144, 10145},
- {4724, 10150, 10150},
- {1665, 10154, 10154},
- {10126, 10163, 10163},
- {9783, 10168, 10168},
- {1715, 10172, 10173},
- {7152, 10177, 10182},
- {8760, 10187, 10187},
- {7829, 10191, 10191},
- {9679, 10196, 10196},
- {9369, 10201, 10201},
- {2928, 10206, 10208},
- {6951, 10214, 10217},
- {5633, 10221, 10221},
- {7199, 10225, 10225},
- {10118, 10230, 10231},
- {9999, 10235, 10236},
- {10045, 10240, 10249},
- {5565, 10256, 10256},
- {9866, 10261, 10261},
- {10163, 10268, 10268},
- {9869, 10272, 10272},
- {9789, 10276, 10283},
- {10235, 10287, 10288},
- {10214, 10298, 10299},
- {6971, 10303, 10303},
- {3346, 10307, 10307},
- {10185, 10311, 10312},
- {9993, 10318, 10320},
- {2779, 10332, 10334},
- {1726, 10338, 10338},
- {741, 10354, 10360},
- {10230, 10372, 10373},
- {10260, 10384, 10385},
- {10131, 10389, 10398},
- {6946, 10406, 10409},
- {10158, 10413, 10420},
- {10123, 10424, 10424},
- {6157, 10428, 10429},
- {4518, 10434, 10434},
- {9893, 10438, 10438},
- {9865, 10442, 10446},
- {7558, 10454, 10454},
- {10434, 10460, 10460},
- {10064, 10466, 10468},
- {2703, 10472, 10474},
- {9751, 10478, 10479},
- {6714, 10485, 10485},
- {8020, 10490, 10490},
- {10303, 10494, 10494},
- {3521, 10499, 10500},
- {9281, 10513, 10515},
- {6028, 10519, 10523},
- {9387, 10527, 10527},
- {7614, 10531, 10531},
- {3611, 10536, 10536},
- {9162, 10540, 10540},
- {10081, 10546, 10547},
- {10034, 10560, 10562},
- {6726, 10567, 10571},
- {8237, 10575, 10575},
- {10438, 10579, 10583},
- {10140, 10587, 10587},
- {5784, 10592, 10592},
- {9819, 10597, 10600},
- {10567, 10604, 10608},
- {9335, 10613, 10613},
- {8300, 10617, 10617},
- {10575, 10621, 10621},
- {9678, 10625, 10626},
- {9962, 10632, 10633},
- {10535, 10637, 10638},
- {8199, 10642, 10642},
- {10372, 10647, 10648},
- {10637, 10656, 10657},
- {10579, 10667, 10668},
- {10465, 10677, 10680},
- {6702, 10684, 10685},
- {10073, 10691, 10692},
- {4505, 10696, 10697},
- {9042, 10701, 10701},
- {6460, 10705, 10706},
- {10010, 10714, 10716},
- {10656, 10720, 10722},
- {7282, 10727, 10729},
- {2327, 10733, 10733},
- {2491, 10740, 10741},
- {10704, 10748, 10750},
- {6465, 10754, 10754},
- {10647, 10758, 10759},
- {10424, 10763, 10763},
- {10748, 10776, 10776},
- {10546, 10780, 10781},
- {10758, 10785, 10786},
- {10287, 10790, 10797},
- {10785, 10801, 10807},
- {10240, 10811, 10826},
- {9509, 10830, 10830},
- {2579, 10836, 10838},
- {9801, 10843, 10845},
- {7555, 10849, 10850},
- {10776, 10860, 10865},
- {8023, 10869, 10869},
- {10046, 10876, 10884},
- {10253, 10888, 10892},
- {9941, 10897, 10897},
- {7898, 10901, 10905},
- {6725, 10909, 10913},
- {10757, 10921, 10923},
- {10160, 10931, 10931},
- {10916, 10935, 10942},
- {10261, 10946, 10946},
- {10318, 10952, 10954},
- {5911, 10959, 10961},
- {10801, 10965, 10966},
- {10946, 10970, 10977},
- {10592, 10982, 10984},
- {9913, 10988, 10990},
- {8510, 10994, 10996},
- {9419, 11000, 11001},
- {6765, 11006, 11007},
- {10725, 11011, 11011},
- {5537, 11017, 11019},
- {9208, 11024, 11025},
- {5850, 11030, 11030},
- {9610, 11034, 11036},
- {8846, 11041, 11047},
- {9697, 11051, 11051},
- {1622, 11055, 11058},
- {2370, 11062, 11062},
- {8393, 11067, 11067},
- {9756, 11071, 11071},
- {10172, 11076, 11076},
- {27, 11081, 11081},
- {7357, 11087, 11092},
- {8151, 11104, 11106},
- {6115, 11110, 11110},
- {10667, 11114, 11115},
- {11099, 11121, 11123},
- {10705, 11127, 11127},
- {8938, 11131, 11131},
- {11114, 11135, 11136},
- {1390, 11140, 11141},
- {10964, 11146, 11148},
- {11140, 11152, 11155},
- {9813, 11159, 11166},
- {624, 11171, 11172},
- {3118, 11177, 11179},
- {11029, 11184, 11186},
- {10186, 11190, 11190},
- {10306, 11196, 11196},
- {8665, 11201, 11201},
- {7382, 11205, 11205},
- {1100, 11210, 11210},
- {2337, 11216, 11217},
- {1609, 11221, 11223},
- {5763, 11228, 11229},
- {5220, 11233, 11233},
- {11061, 11241, 11241},
- {10617, 11246, 11246},
- {11190, 11250, 11251},
- {10144, 11255, 11256},
- {11232, 11260, 11260},
- {857, 11264, 11265},
- {10994, 11269, 11271},
- {3879, 11280, 11281},
- {11184, 11287, 11289},
- {9611, 11293, 11295},
- {11250, 11299, 11299},
- {4495, 11304, 11304},
- {7574, 11308, 11309},
- {9814, 11315, 11317},
- {1713, 11321, 11324},
- {1905, 11328, 11328},
- {8745, 11335, 11340},
- {8883, 11351, 11351},
- {8119, 11358, 11358},
- {1842, 11363, 11364},
- {11237, 11368, 11368},
- {8814, 11373, 11374},
- {5684, 11378, 11378},
- {11011, 11382, 11382},
- {6520, 11389, 11389},
- {11183, 11393, 11396},
- {1790, 11404, 11404},
- {9536, 11408, 11408},
- {11298, 11418, 11419},
- {3929, 11425, 11425},
- {5588, 11429, 11429},
- {8476, 11436, 11436},
- {4096, 11440, 11442},
- {11084, 11446, 11454},
- {10603, 11458, 11463},
- {7332, 11472, 11474},
- {7611, 11483, 11486},
- {4836, 11490, 11491},
- {10024, 11495, 11495},
- {4917, 11501, 11506},
- {6486, 11510, 11512},
- {11269, 11516, 11518},
- {3603, 11522, 11525},
- {11126, 11535, 11535},
- {11418, 11539, 11541},
- {11408, 11545, 11545},
- {9021, 11549, 11552},
- {6745, 11557, 11557},
- {5118, 11561, 11564},
- {7590, 11568, 11569},
- {4426, 11573, 11578},
- {9790, 11582, 11583},
- {6447, 11587, 11587},
- {10229, 11591, 11594},
- {10457, 11598, 11598},
- {10168, 11604, 11604},
- {10543, 11608, 11608},
- {7404, 11612, 11612},
- {11127, 11616, 11616},
- {3337, 11620, 11620},
- {11501, 11624, 11628},
- {4543, 11633, 11635},
- {8449, 11642, 11642},
- {4943, 11646, 11648},
- {10526, 11652, 11654},
- {11620, 11659, 11659},
- {8927, 11664, 11669},
- {532, 11673, 11673},
- {10513, 11677, 11679},
- {10428, 11683, 11683},
- {10999, 11689, 11690},
- {9469, 11695, 11695},
- {3606, 11699, 11699},
- {9560, 11708, 11709},
- {1564, 11714, 11714},
- {10527, 11718, 11718},
- {3071, 11723, 11726},
- {11590, 11731, 11732},
- {6605, 11737, 11737},
- {11624, 11741, 11745},
- {7822, 11749, 11752},
- {5269, 11757, 11758},
- {1339, 11767, 11767},
- {1363, 11771, 11773},
- {3704, 11777, 11777},
- {10952, 11781, 11783},
- {6764, 11793, 11795},
- {8675, 11800, 11800},
- {9963, 11804, 11804},
- {11573, 11808, 11809},
- {9548, 11813, 11813},
- {11591, 11817, 11818},
- {11446, 11822, 11822},
- {9224, 11828, 11828},
- {3158, 11836, 11836},
- {10830, 11840, 11840},
- {7234, 11846, 11846},
- {11299, 11850, 11850},
- {11544, 11854, 11855},
- {11498, 11859, 11859},
- {10993, 11865, 11868},
- {9720, 11872, 11878},
- {10489, 11882, 11890},
- {11712, 11898, 11904},
- {11516, 11908, 11910},
- {11568, 11914, 11915},
- {10177, 11919, 11924},
- {11363, 11928, 11929},
- {10494, 11933, 11933},
- {9870, 11937, 11938},
- {9427, 11942, 11942},
- {11481, 11949, 11949},
- {6030, 11955, 11957},
- {11718, 11961, 11961},
- {10531, 11965, 11983},
- {5126, 11987, 11987},
- {7515, 11991, 11991},
- {10646, 11996, 11997},
- {2947, 12001, 12001},
- {9582, 12009, 12010},
- {6202, 12017, 12018},
- {11714, 12022, 12022},
- {9235, 12033, 12037},
- {9721, 12041, 12044},
- {11932, 12051, 12052},
- {12040, 12056, 12056},
- {12051, 12060, 12060},
- {11601, 12066, 12066},
- {8426, 12070, 12070},
- {4053, 12077, 12077},
- {4262, 12081, 12081},
- {9761, 12086, 12088},
- {11582, 12092, 12093},
- {10965, 12097, 12098},
- {11803, 12103, 12104},
- {11933, 12108, 12109},
- {10688, 12117, 12117},
- {12107, 12125, 12126},
- {6774, 12130, 12132},
- {6286, 12137, 12137},
- {9543, 12141, 12141},
- {12097, 12145, 12146},
- {10790, 12150, 12150},
- {10125, 12154, 12156},
- {12125, 12164, 12164},
- {12064, 12168, 12172},
- {10811, 12178, 12188},
- {12092, 12192, 12193},
- {10058, 12197, 12198},
- {11611, 12211, 12212},
- {3459, 12216, 12216},
- {10291, 12225, 12228},
- {12191, 12232, 12234},
- {12145, 12238, 12238},
- {12001, 12242, 12250},
- {3840, 12255, 12255},
- {12216, 12259, 12259},
- {674, 12272, 12272},
- {12141, 12276, 12276},
- {10766, 12280, 12280},
- {11545, 12284, 12284},
- {6496, 12290, 12290},
- {11381, 12294, 12295},
- {603, 12302, 12303},
- {12276, 12308, 12308},
- {11850, 12313, 12314},
- {565, 12319, 12319},
- {9351, 12324, 12324},
- {11822, 12328, 12328},
- {2691, 12333, 12334},
- {11840, 12338, 12338},
- {11070, 12343, 12343},
- {9510, 12347, 12347},
- {11024, 12352, 12353},
- {7173, 12359, 12359},
- {517, 12363, 12363},
- {6311, 12367, 12368},
- {11367, 12372, 12373},
- {12008, 12377, 12377},
- {11372, 12382, 12384},
- {11358, 12391, 12392},
- {11382, 12396, 12396},
- {6882, 12400, 12401},
- {11246, 12405, 12405},
- {8359, 12409, 12412},
- {10154, 12418, 12418},
- {12016, 12425, 12426},
- {8972, 12434, 12435},
- {10478, 12439, 12440},
- {12395, 12449, 12449},
- {11612, 12454, 12454},
- {12347, 12458, 12458},
- {10700, 12466, 12467},
- {3637, 12471, 12476},
- {1042, 12480, 12481},
- {6747, 12488, 12488},
- {12396, 12492, 12493},
- {9420, 12497, 12497},
- {11285, 12501, 12510},
- {4470, 12515, 12515},
- {9374, 12519, 12519},
- {11293, 12528, 12528},
- {2058, 12534, 12535},
- {6521, 12539, 12539},
- {12492, 12543, 12543},
- {3043, 12547, 12547},
- {2982, 12551, 12553},
- {11030, 12557, 12563},
- {7636, 12568, 12568},
- {9639, 12572, 12572},
- {12543, 12576, 12576},
- {5989, 12580, 12583},
- {11051, 12587, 12587},
- {1061, 12592, 12594},
- {12313, 12599, 12601},
- {11846, 12605, 12605},
- {12576, 12609, 12609},
- {11040, 12618, 12625},
- {12479, 12629, 12629},
- {6903, 12633, 12633},
- {12322, 12639, 12639},
- {12253, 12643, 12645},
- {5594, 12651, 12651},
- {12522, 12655, 12655},
- {11703, 12659, 12659},
- {1377, 12665, 12665},
- {8022, 12669, 12669},
- {12280, 12674, 12674},
- {9023, 12680, 12681},
- {12328, 12685, 12685},
- {3085, 12689, 12693},
- {4700, 12698, 12698},
- {10224, 12702, 12702},
- {8781, 12706, 12706},
- {1651, 12710, 12710},
- {12458, 12714, 12714},
- {12005, 12718, 12721},
- {11908, 12725, 12726},
- {8202, 12733, 12733},
- {11708, 12739, 12740},
- {12599, 12744, 12745},
- {12284, 12749, 12749},
- {5285, 12756, 12756},
- {12055, 12775, 12777},
- {6919, 12782, 12782},
- {12242, 12786, 12786},
- {12009, 12790, 12790},
- {9628, 12794, 12796},
- {11354, 12801, 12802},
- {10225, 12806, 12807},
- {579, 12813, 12813},
- {8935, 12817, 12822},
- {8753, 12827, 12829},
- {11006, 12835, 12835},
- {858, 12841, 12845},
- {476, 12849, 12849},
- {7667, 12854, 12854},
- {12760, 12860, 12871},
- {11677, 12875, 12877},
- {12714, 12881, 12881},
- {12731, 12885, 12890},
- {7108, 12894, 12896},
- {1165, 12900, 12900},
- {4021, 12906, 12906},
- {10829, 12910, 12911},
- {12331, 12915, 12915},
- {8887, 12919, 12921},
- {11639, 12925, 12925},
- {7964, 12929, 12929},
- {12528, 12937, 12937},
- {8148, 12941, 12941},
- {12770, 12948, 12950},
- {12609, 12954, 12954},
- {12685, 12958, 12958},
- {2803, 12962, 12962},
- {9561, 12966, 12966},
- {6671, 12972, 12973},
- {12056, 12977, 12977},
- {6380, 12981, 12981},
- {12048, 12985, 12985},
- {11961, 12989, 12993},
- {3368, 12997, 12999},
- {6634, 13004, 13004},
- {6775, 13009, 13010},
- {12136, 13014, 13019},
- {10341, 13023, 13023},
- {13002, 13027, 13027},
- {10587, 13031, 13031},
- {10307, 13035, 13035},
- {12736, 13039, 13039},
- {12744, 13043, 13044},
- {6175, 13048, 13048},
- {9702, 13053, 13054},
- {662, 13059, 13061},
- {12718, 13065, 13068},
- {12893, 13072, 13075},
- {8299, 13086, 13091},
- {12604, 13095, 13096},
- {12848, 13100, 13101},
- {12749, 13105, 13105},
- {12526, 13109, 13114},
- {9173, 13122, 13122},
- {12769, 13128, 13128},
- {13038, 13132, 13132},
- {12725, 13136, 13137},
- {12639, 13146, 13146},
- {9711, 13150, 13151},
- {12137, 13155, 13155},
- {13039, 13159, 13159},
- {4681, 13163, 13164},
- {12954, 13168, 13168},
- {13158, 13175, 13176},
- {13105, 13180, 13180},
- {10754, 13184, 13184},
- {13167, 13188, 13188},
- {12658, 13192, 13192},
- {4294, 13199, 13200},
- {11682, 13204, 13205},
- {11695, 13209, 13209},
- {11076, 13214, 13214},
- {12232, 13218, 13218},
- {9399, 13223, 13224},
- {12880, 13228, 13229},
- {13048, 13234, 13234},
- {9701, 13238, 13239},
- {13209, 13243, 13243},
- {3658, 13248, 13248},
- {3698, 13252, 13254},
- {12237, 13260, 13260},
- {8872, 13266, 13266},
- {12957, 13272, 13273},
- {1393, 13281, 13281},
- {2013, 13285, 13288},
- {4244, 13296, 13299},
- {9428, 13303, 13303},
- {12702, 13307, 13307},
- {13078, 13311, 13311},
- {6071, 13315, 13315},
- {3061, 13319, 13319},
- {2051, 13324, 13324},
- {11560, 13328, 13331},
- {6584, 13336, 13336},
- {8482, 13340, 13340},
- {5331, 13344, 13344},
- {4171, 13348, 13348},
- {8501, 13352, 13352},
- {9219, 13356, 13356},
- {9473, 13360, 13363},
- {12881, 13367, 13367},
- {13065, 13371, 13375},
- {2979, 13379, 13384},
- {1518, 13388, 13388},
- {11177, 13392, 13392},
- {9457, 13398, 13398},
- {12293, 13407, 13410},
- {3697, 13414, 13417},
- {10338, 13425, 13425},
- {13367, 13429, 13429},
- {11074, 13433, 13437},
- {4201, 13441, 13443},
- {1812, 13447, 13448},
- {13360, 13452, 13456},
- {13188, 13463, 13463},
- {9732, 13470, 13470},
- {11332, 13477, 13477},
- {9918, 13487, 13487},
- {6337, 13497, 13497},
- {13429, 13501, 13501},
- {11413, 13505, 13505},
- {4685, 13512, 13513},
- {13136, 13517, 13519},
- {7416, 13528, 13530},
- {12929, 13534, 13534},
- {11110, 13539, 13539},
- {11521, 13543, 13543},
- {12825, 13553, 13553},
- {13447, 13557, 13558},
- {12299, 13562, 13563},
- {9003, 13570, 13570},
- {12500, 13577, 13577},
- {13501, 13581, 13581},
- {9392, 13586, 13586},
- {12454, 13590, 13590},
- {6189, 13595, 13595},
- {13053, 13599, 13599},
- {11881, 13604, 13604},
- {13159, 13608, 13608},
- {4894, 13612, 13612},
- {13221, 13621, 13621},
- {8950, 13625, 13625},
- {13533, 13629, 13629},
- {9633, 13633, 13633},
- {7892, 13637, 13639},
- {13581, 13643, 13643},
- {13616, 13647, 13649},
- {12794, 13653, 13654},
- {8919, 13659, 13659},
- {9674, 13663, 13663},
- {13577, 13668, 13668},
- {12966, 13672, 13672},
- {12659, 13676, 13683},
- {6124, 13688, 13688},
- {9225, 13693, 13695},
- {11833, 13702, 13702},
- {12904, 13709, 13717},
- {13647, 13721, 13722},
- {11687, 13726, 13727},
- {12434, 13731, 13732},
- {12689, 13736, 13742},
- {13168, 13746, 13746},
- {6151, 13751, 13752},
- {11821, 13756, 13757},
- {6467, 13764, 13764},
- {5730, 13769, 13769},
- {5136, 13780, 13780},
- {724, 13784, 13785},
- {13517, 13789, 13791},
- {640, 13795, 13796},
- {7721, 13800, 13802},
- {11121, 13806, 13807},
- {5791, 13811, 13815},
- {12894, 13819, 13819},
- {11100, 13824, 13824},
- {7011, 13830, 13830},
- {7129, 13834, 13837},
- {13833, 13841, 13841},
- {11276, 13847, 13847},
- {13621, 13853, 13853},
- {13589, 13862, 13863},
- {12989, 13867, 13867},
- {12789, 13871, 13871},
- {1239, 13875, 13875},
- {4675, 13879, 13881},
- {4686, 13885, 13885},
- {707, 13889, 13889},
- {5449, 13897, 13898},
- {13867, 13902, 13903},
- {10613, 13908, 13908},
- {13789, 13912, 13914},
- {4451, 13918, 13919},
- {9200, 13924, 13924},
- {2011, 13930, 13930},
- {11433, 13934, 13936},
- {4695, 13942, 13943},
- {9435, 13948, 13951},
- {13688, 13955, 13957},
- {11694, 13961, 13962},
- {5712, 13966, 13966},
- {5991, 13970, 13972},
- {13477, 13976, 13976},
- {10213, 13987, 13987},
- {11839, 13991, 13993},
- {12272, 13997, 13997},
- {6206, 14001, 14001},
- {13179, 14006, 14007},
- {2939, 14011, 14011},
- {12972, 14016, 14017},
- {13918, 14021, 14022},
- {7436, 14026, 14027},
- {7678, 14032, 14034},
- {13586, 14040, 14040},
- {13347, 14044, 14044},
- {13109, 14048, 14051},
- {9244, 14055, 14057},
- {13315, 14061, 14061},
- {13276, 14067, 14067},
- {11435, 14073, 14074},
- {13853, 14078, 14078},
- {13452, 14082, 14082},
- {14044, 14087, 14087},
- {4440, 14091, 14095},
- {4479, 14100, 14103},
- {9395, 14107, 14109},
- {6834, 14119, 14119},
- {10458, 14123, 14124},
- {1429, 14129, 14129},
- {8443, 14135, 14135},
- {10365, 14140, 14140},
- {5267, 14145, 14145},
- {11834, 14151, 14153},
-}
diff --git a/vendor/src/github.com/golang/snappy/snappy.go b/vendor/src/github.com/golang/snappy/snappy.go
deleted file mode 100644
index 0cf5e37..0000000
--- a/vendor/src/github.com/golang/snappy/snappy.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package snappy implements the snappy block-based compression format.
-// It aims for very high speeds and reasonable compression.
-//
-// The C++ snappy implementation is at https://github.com/google/snappy
-package snappy // import "github.com/golang/snappy"
-
-import (
- "hash/crc32"
-)
-
-/*
-Each encoded block begins with the varint-encoded length of the decoded data,
-followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
-first byte of each chunk is broken into its 2 least and 6 most significant bits
-called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
-Zero means a literal tag. All other values mean a copy tag.
-
-For literal tags:
- - If m < 60, the next 1 + m bytes are literal bytes.
- - Otherwise, let n be the little-endian unsigned integer denoted by the next
- m - 59 bytes. The next 1 + n bytes after that are literal bytes.
-
-For copy tags, length bytes are copied from offset bytes ago, in the style of
-Lempel-Ziv compression algorithms. In particular:
- - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
- The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
- of the offset. The next byte is bits 0-7 of the offset.
- - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
- The length is 1 + m. The offset is the little-endian unsigned integer
- denoted by the next 2 bytes.
- - For l == 3, this tag is a legacy format that is no longer issued by most
- encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
- [1, 65). The length is 1 + m. The offset is the little-endian unsigned
- integer denoted by the next 4 bytes.
-*/
-const (
- tagLiteral = 0x00
- tagCopy1 = 0x01
- tagCopy2 = 0x02
- tagCopy4 = 0x03
-)
-
-const (
- checksumSize = 4
- chunkHeaderSize = 4
- magicChunk = "\xff\x06\x00\x00" + magicBody
- magicBody = "sNaPpY"
-
- // maxBlockSize is the maximum size of the input to encodeBlock. It is not
- // part of the wire format per se, but some parts of the encoder assume
- // that an offset fits into a uint16.
- //
- // Also, for the framing format (Writer type instead of Encode function),
- // https://github.com/google/snappy/blob/master/framing_format.txt says
- // that "the uncompressed data in a chunk must be no longer than 65536
- // bytes".
- maxBlockSize = 65536
-
- // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
- // hard coded to be a const instead of a variable, so that obufLen can also
- // be a const. Their equivalence is confirmed by
- // TestMaxEncodedLenOfMaxBlockSize.
- maxEncodedLenOfMaxBlockSize = 76490
-
- obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
- obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
-)
-
-const (
- chunkTypeCompressedData = 0x00
- chunkTypeUncompressedData = 0x01
- chunkTypePadding = 0xfe
- chunkTypeStreamIdentifier = 0xff
-)
-
-var crcTable = crc32.MakeTable(crc32.Castagnoli)
-
-// crc implements the checksum specified in section 3 of
-// https://github.com/google/snappy/blob/master/framing_format.txt
-func crc(b []byte) uint32 {
- c := crc32.Update(0, crcTable, b)
- return uint32(c>>15|c<<17) + 0xa282ead8
-}
diff --git a/vendor/src/github.com/golang/snappy/snappy_test.go b/vendor/src/github.com/golang/snappy/snappy_test.go
deleted file mode 100644
index 2712710..0000000
--- a/vendor/src/github.com/golang/snappy/snappy_test.go
+++ /dev/null
@@ -1,1353 +0,0 @@
-// Copyright 2011 The Snappy-Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package snappy
-
-import (
- "bytes"
- "encoding/binary"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "math/rand"
- "net/http"
- "os"
- "os/exec"
- "path/filepath"
- "runtime"
- "strings"
- "testing"
-)
-
-var (
- download = flag.Bool("download", false, "If true, download any missing files before running benchmarks")
- testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data")
- benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data")
-)
-
-// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by
-// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on
-// this GOARCH. There is more than one valid encoding of any given input, and
-// there is more than one good algorithm along the frontier of trading off
-// throughput for output size. Nonetheless, we presume that the C++ encoder's
-// algorithm is a good one and has been tested on a wide range of inputs, so
-// matching that exactly should mean that the Go encoder's algorithm is also
-// good, without needing to gather our own corpus of test data.
-//
-// The exact algorithm used by the C++ code is potentially endian dependent, as
-// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes
-// at a time. The Go implementation is endian agnostic, in that its output is
-// the same (as little-endian C++ code), regardless of the CPU's endianness.
-//
-// Thus, when comparing Go's output to C++ output generated beforehand, such as
-// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little-
-// endian system, we can run that test regardless of the runtime.GOARCH value.
-//
-// When comparing Go's output to dynamically generated C++ output, i.e. the
-// result of fork/exec'ing a C++ program, we can run that test only on
-// little-endian systems, because the C++ output might be different on
-// big-endian systems. The runtime package doesn't export endianness per se,
-// but we can restrict this match-C++ test to common little-endian systems.
-const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm"
-
-func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) {
- got := maxEncodedLenOfMaxBlockSize
- want := MaxEncodedLen(maxBlockSize)
- if got != want {
- t.Fatalf("got %d, want %d", got, want)
- }
-}
-
-func cmp(a, b []byte) error {
- if bytes.Equal(a, b) {
- return nil
- }
- if len(a) != len(b) {
- return fmt.Errorf("got %d bytes, want %d", len(a), len(b))
- }
- for i := range a {
- if a[i] != b[i] {
- return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i])
- }
- }
- return nil
-}
-
-func roundtrip(b, ebuf, dbuf []byte) error {
- d, err := Decode(dbuf, Encode(ebuf, b))
- if err != nil {
- return fmt.Errorf("decoding error: %v", err)
- }
- if err := cmp(d, b); err != nil {
- return fmt.Errorf("roundtrip mismatch: %v", err)
- }
- return nil
-}
-
-func TestEmpty(t *testing.T) {
- if err := roundtrip(nil, nil, nil); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestSmallCopy(t *testing.T) {
- for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
- for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} {
- for i := 0; i < 32; i++ {
- s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb"
- if err := roundtrip([]byte(s), ebuf, dbuf); err != nil {
- t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err)
- }
- }
- }
- }
-}
-
-func TestSmallRand(t *testing.T) {
- rng := rand.New(rand.NewSource(1))
- for n := 1; n < 20000; n += 23 {
- b := make([]byte, n)
- for i := range b {
- b[i] = uint8(rng.Intn(256))
- }
- if err := roundtrip(b, nil, nil); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestSmallRegular(t *testing.T) {
- for n := 1; n < 20000; n += 23 {
- b := make([]byte, n)
- for i := range b {
- b[i] = uint8(i%10 + 'a')
- }
- if err := roundtrip(b, nil, nil); err != nil {
- t.Fatal(err)
- }
- }
-}
-
-func TestInvalidVarint(t *testing.T) {
- testCases := []struct {
- desc string
- input string
- }{{
- "invalid varint, final byte has continuation bit set",
- "\xff",
- }, {
- "invalid varint, value overflows uint64",
- "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00",
- }, {
- // https://github.com/google/snappy/blob/master/format_description.txt
- // says that "the stream starts with the uncompressed length [as a
- // varint] (up to a maximum of 2^32 - 1)".
- "valid varint (as uint64), but value overflows uint32",
- "\x80\x80\x80\x80\x10",
- }}
-
- for _, tc := range testCases {
- input := []byte(tc.input)
- if _, err := DecodedLen(input); err != ErrCorrupt {
- t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err)
- }
- if _, err := Decode(nil, input); err != ErrCorrupt {
- t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err)
- }
- }
-}
-
-func TestDecode(t *testing.T) {
- lit40Bytes := make([]byte, 40)
- for i := range lit40Bytes {
- lit40Bytes[i] = byte(i)
- }
- lit40 := string(lit40Bytes)
-
- testCases := []struct {
- desc string
- input string
- want string
- wantErr error
- }{{
- `decodedLen=0; valid input`,
- "\x00",
- "",
- nil,
- }, {
- `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`,
- "\x03" + "\x08\xff\xff\xff",
- "\xff\xff\xff",
- nil,
- }, {
- `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`,
- "\x02" + "\x08\xff\xff\xff",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`,
- "\x03" + "\x08\xff\xff",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`,
- "\x28" + "\x9c" + lit40,
- lit40,
- nil,
- }, {
- `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`,
- "\x01" + "\xf0",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`,
- "\x03" + "\xf0\x02\xff\xff\xff",
- "\xff\xff\xff",
- nil,
- }, {
- `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`,
- "\x01" + "\xf4\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`,
- "\x03" + "\xf4\x02\x00\xff\xff\xff",
- "\xff\xff\xff",
- nil,
- }, {
- `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`,
- "\x01" + "\xf8\x00\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`,
- "\x03" + "\xf8\x02\x00\x00\xff\xff\xff",
- "\xff\xff\xff",
- nil,
- }, {
- `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`,
- "\x01" + "\xfc\x00\x00\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`,
- "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`,
- "\x04" + "\xfc\x02\x00\x00\x00\xff",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`,
- "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff",
- "\xff\xff\xff",
- nil,
- }, {
- `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`,
- "\x04" + "\x01",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`,
- "\x04" + "\x02\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`,
- "\x04" + "\x03\x00\x00\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`,
- "\x04" + "\x0cabcd",
- "abcd",
- nil,
- }, {
- `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`,
- "\x0d" + "\x0cabcd" + "\x15\x04",
- "abcdabcdabcda",
- nil,
- }, {
- `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`,
- "\x08" + "\x0cabcd" + "\x01\x04",
- "abcdabcd",
- nil,
- }, {
- `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`,
- "\x08" + "\x0cabcd" + "\x01\x02",
- "abcdcdcd",
- nil,
- }, {
- `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`,
- "\x08" + "\x0cabcd" + "\x01\x01",
- "abcddddd",
- nil,
- }, {
- `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`,
- "\x08" + "\x0cabcd" + "\x01\x00",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`,
- "\x09" + "\x0cabcd" + "\x01\x04",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`,
- "\x08" + "\x0cabcd" + "\x01\x05",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`,
- "\x07" + "\x0cabcd" + "\x01\x04",
- "",
- ErrCorrupt,
- }, {
- `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`,
- "\x06" + "\x0cabcd" + "\x06\x03\x00",
- "abcdbc",
- nil,
- }, {
- `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`,
- "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00",
- "abcdbc",
- nil,
- }}
-
- const (
- // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are
- // not present in either the input or the output. It is written to dBuf
- // to check that Decode does not write bytes past the end of
- // dBuf[:dLen].
- //
- // The magic number 37 was chosen because it is prime. A more 'natural'
- // number like 32 might lead to a false negative if, for example, a
- // byte was incorrectly copied 4*8 bytes later.
- notPresentBase = 0xa0
- notPresentLen = 37
- )
-
- var dBuf [100]byte
-loop:
- for i, tc := range testCases {
- input := []byte(tc.input)
- for _, x := range input {
- if notPresentBase <= x && x < notPresentBase+notPresentLen {
- t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input)
- continue loop
- }
- }
-
- dLen, n := binary.Uvarint(input)
- if n <= 0 {
- t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc)
- continue
- }
- if dLen > uint64(len(dBuf)) {
- t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen)
- continue
- }
-
- for j := range dBuf {
- dBuf[j] = byte(notPresentBase + j%notPresentLen)
- }
- g, gotErr := Decode(dBuf[:], input)
- if got := string(g); got != tc.want || gotErr != tc.wantErr {
- t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v",
- i, tc.desc, got, gotErr, tc.want, tc.wantErr)
- continue
- }
- for j, x := range dBuf {
- if uint64(j) < dLen {
- continue
- }
- if w := byte(notPresentBase + j%notPresentLen); x != w {
- t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x",
- i, tc.desc, j, x, w, dBuf)
- continue loop
- }
- }
- }
-}
-
-func TestDecodeCopy4(t *testing.T) {
- dots := strings.Repeat(".", 65536)
-
- input := strings.Join([]string{
- "\x89\x80\x04", // decodedLen = 65545.
- "\x0cpqrs", // 4-byte literal "pqrs".
- "\xf4\xff\xff" + dots, // 65536-byte literal dots.
- "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540.
- }, "")
-
- gotBytes, err := Decode(nil, []byte(input))
- if err != nil {
- t.Fatal(err)
- }
- got := string(gotBytes)
- want := "pqrs" + dots + "pqrs."
- if len(got) != len(want) {
- t.Fatalf("got %d bytes, want %d", len(got), len(want))
- }
- if got != want {
- for i := 0; i < len(got); i++ {
- if g, w := got[i], want[i]; g != w {
- t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w)
- }
- }
- }
-}
-
-// TestDecodeLengthOffset tests decoding an encoding of the form literal +
-// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB".
-func TestDecodeLengthOffset(t *testing.T) {
- const (
- prefix = "abcdefghijklmnopqr"
- suffix = "ABCDEFGHIJKLMNOPQR"
-
- // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are
- // not present in either the input or the output. It is written to
- // gotBuf to check that Decode does not write bytes past the end of
- // gotBuf[:totalLen].
- //
- // The magic number 37 was chosen because it is prime. A more 'natural'
- // number like 32 might lead to a false negative if, for example, a
- // byte was incorrectly copied 4*8 bytes later.
- notPresentBase = 0xa0
- notPresentLen = 37
- )
- var gotBuf, wantBuf, inputBuf [128]byte
- for length := 1; length <= 18; length++ {
- for offset := 1; offset <= 18; offset++ {
- loop:
- for suffixLen := 0; suffixLen <= 18; suffixLen++ {
- totalLen := len(prefix) + length + suffixLen
-
- inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen))
- inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1)
- inputLen++
- inputLen += copy(inputBuf[inputLen:], prefix)
- inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1)
- inputBuf[inputLen+1] = byte(offset)
- inputBuf[inputLen+2] = 0x00
- inputLen += 3
- if suffixLen > 0 {
- inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1)
- inputLen++
- inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen])
- }
- input := inputBuf[:inputLen]
-
- for i := range gotBuf {
- gotBuf[i] = byte(notPresentBase + i%notPresentLen)
- }
- got, err := Decode(gotBuf[:], input)
- if err != nil {
- t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err)
- continue
- }
-
- wantLen := 0
- wantLen += copy(wantBuf[wantLen:], prefix)
- for i := 0; i < length; i++ {
- wantBuf[wantLen] = wantBuf[wantLen-offset]
- wantLen++
- }
- wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen])
- want := wantBuf[:wantLen]
-
- for _, x := range input {
- if notPresentBase <= x && x < notPresentBase+notPresentLen {
- t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x",
- length, offset, suffixLen, x, input)
- continue loop
- }
- }
- for i, x := range gotBuf {
- if i < totalLen {
- continue
- }
- if w := byte(notPresentBase + i%notPresentLen); x != w {
- t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+
- "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x",
- length, offset, suffixLen, totalLen, i, x, w, gotBuf)
- continue loop
- }
- }
- for _, x := range want {
- if notPresentBase <= x && x < notPresentBase+notPresentLen {
- t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x",
- length, offset, suffixLen, x, want)
- continue loop
- }
- }
-
- if !bytes.Equal(got, want) {
- t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x",
- length, offset, suffixLen, input, got, want)
- continue
- }
- }
- }
- }
-}
-
-const (
- goldenText = "Mark.Twain-Tom.Sawyer.txt"
- goldenCompressed = goldenText + ".rawsnappy"
-)
-
-func TestDecodeGoldenInput(t *testing.T) {
- tDir := filepath.FromSlash(*testdataDir)
- src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed))
- if err != nil {
- t.Fatalf("ReadFile: %v", err)
- }
- got, err := Decode(nil, src)
- if err != nil {
- t.Fatalf("Decode: %v", err)
- }
- want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
- if err != nil {
- t.Fatalf("ReadFile: %v", err)
- }
- if err := cmp(got, want); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestEncodeGoldenInput(t *testing.T) {
- tDir := filepath.FromSlash(*testdataDir)
- src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
- if err != nil {
- t.Fatalf("ReadFile: %v", err)
- }
- got := Encode(nil, src)
- want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed))
- if err != nil {
- t.Fatalf("ReadFile: %v", err)
- }
- if err := cmp(got, want); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestExtendMatchGoldenInput(t *testing.T) {
- tDir := filepath.FromSlash(*testdataDir)
- src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
- if err != nil {
- t.Fatalf("ReadFile: %v", err)
- }
- for i, tc := range extendMatchGoldenTestCases {
- got := extendMatch(src, tc.i, tc.j)
- if got != tc.want {
- t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)",
- i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j)
- }
- }
-}
-
-func TestExtendMatch(t *testing.T) {
- // ref is a simple, reference implementation of extendMatch.
- ref := func(src []byte, i, j int) int {
- for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
- }
- return j
- }
-
- nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40}
- for yIndex := 40; yIndex > 30; yIndex-- {
- xxx := bytes.Repeat([]byte("x"), 40)
- if yIndex < len(xxx) {
- xxx[yIndex] = 'y'
- }
- for _, i := range nums {
- for _, j := range nums {
- if i >= j {
- continue
- }
- got := extendMatch(xxx, i, j)
- want := ref(xxx, i, j)
- if got != want {
- t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want)
- }
- }
- }
- }
-}
-
-const snappytoolCmdName = "cmd/snappytool/snappytool"
-
-func skipTestSameEncodingAsCpp() (msg string) {
- if !goEncoderShouldMatchCppEncoder {
- return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH)
- }
- if _, err := os.Stat(snappytoolCmdName); err != nil {
- return fmt.Sprintf("could not find snappytool: %v", err)
- }
- return ""
-}
-
-func runTestSameEncodingAsCpp(src []byte) error {
- got := Encode(nil, src)
-
- cmd := exec.Command(snappytoolCmdName, "-e")
- cmd.Stdin = bytes.NewReader(src)
- want, err := cmd.Output()
- if err != nil {
- return fmt.Errorf("could not run snappytool: %v", err)
- }
- return cmp(got, want)
-}
-
-func TestSameEncodingAsCppShortCopies(t *testing.T) {
- if msg := skipTestSameEncodingAsCpp(); msg != "" {
- t.Skip(msg)
- }
- src := bytes.Repeat([]byte{'a'}, 20)
- for i := 0; i <= len(src); i++ {
- if err := runTestSameEncodingAsCpp(src[:i]); err != nil {
- t.Errorf("i=%d: %v", i, err)
- }
- }
-}
-
-func TestSameEncodingAsCppLongFiles(t *testing.T) {
- if msg := skipTestSameEncodingAsCpp(); msg != "" {
- t.Skip(msg)
- }
- bDir := filepath.FromSlash(*benchdataDir)
- failed := false
- for i, tf := range testFiles {
- if err := downloadBenchmarkFiles(t, tf.filename); err != nil {
- t.Fatalf("failed to download testdata: %s", err)
- }
- data := readFile(t, filepath.Join(bDir, tf.filename))
- if n := tf.sizeLimit; 0 < n && n < len(data) {
- data = data[:n]
- }
- if err := runTestSameEncodingAsCpp(data); err != nil {
- t.Errorf("i=%d: %v", i, err)
- failed = true
- }
- }
- if failed {
- t.Errorf("was the snappytool program built against the C++ snappy library version " +
- "d53de187 or later, commited on 2016-04-05? See " +
- "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc")
- }
-}
-
-// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm
-// described in decode_amd64.s and its claim of a 10 byte overrun worst case.
-func TestSlowForwardCopyOverrun(t *testing.T) {
- const base = 100
-
- for length := 1; length < 18; length++ {
- for offset := 1; offset < 18; offset++ {
- highWaterMark := base
- d := base
- l := length
- o := offset
-
- // makeOffsetAtLeast8
- for o < 8 {
- if end := d + 8; highWaterMark < end {
- highWaterMark = end
- }
- l -= o
- d += o
- o += o
- }
-
- // fixUpSlowForwardCopy
- a := d
- d += l
-
- // finishSlowForwardCopy
- for l > 0 {
- if end := a + 8; highWaterMark < end {
- highWaterMark = end
- }
- a += 8
- l -= 8
- }
-
- dWant := base + length
- overrun := highWaterMark - dWant
- if d != dWant || overrun < 0 || 10 < overrun {
- t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])",
- length, offset, d, overrun, dWant)
- }
- }
- }
-}
-
-// TestEncodeNoiseThenRepeats encodes input for which the first half is very
-// incompressible and the second half is very compressible. The encoded form's
-// length should be closer to 50% of the original length than 100%.
-func TestEncodeNoiseThenRepeats(t *testing.T) {
- for _, origLen := range []int{256 * 1024, 2048 * 1024} {
- src := make([]byte, origLen)
- rng := rand.New(rand.NewSource(1))
- firstHalf, secondHalf := src[:origLen/2], src[origLen/2:]
- for i := range firstHalf {
- firstHalf[i] = uint8(rng.Intn(256))
- }
- for i := range secondHalf {
- secondHalf[i] = uint8(i >> 8)
- }
- dst := Encode(nil, src)
- if got, want := len(dst), origLen*3/4; got >= want {
- t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want)
- }
- }
-}
-
-func TestFramingFormat(t *testing.T) {
- // src is comprised of alternating 1e5-sized sequences of random
- // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen
- // because it is larger than maxBlockSize (64k).
- src := make([]byte, 1e6)
- rng := rand.New(rand.NewSource(1))
- for i := 0; i < 10; i++ {
- if i%2 == 0 {
- for j := 0; j < 1e5; j++ {
- src[1e5*i+j] = uint8(rng.Intn(256))
- }
- } else {
- for j := 0; j < 1e5; j++ {
- src[1e5*i+j] = uint8(i)
- }
- }
- }
-
- buf := new(bytes.Buffer)
- if _, err := NewWriter(buf).Write(src); err != nil {
- t.Fatalf("Write: encoding: %v", err)
- }
- dst, err := ioutil.ReadAll(NewReader(buf))
- if err != nil {
- t.Fatalf("ReadAll: decoding: %v", err)
- }
- if err := cmp(dst, src); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestWriterGoldenOutput(t *testing.T) {
- buf := new(bytes.Buffer)
- w := NewBufferedWriter(buf)
- defer w.Close()
- w.Write([]byte("abcd")) // Not compressible.
- w.Flush()
- w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible.
- w.Flush()
- // The next chunk is also compressible, but a naive, greedy encoding of the
- // overall length 67 copy as a length 64 copy (the longest expressible as a
- // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte
- // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4
- // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2
- // (of length 60) and a 2-byte tagCopy1 (of length 7).
- w.Write(bytes.Repeat([]byte{'B'}, 68))
- w.Write([]byte("efC")) // Not compressible.
- w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible.
- w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible.
- w.Write([]byte("g")) // Not compressible.
- w.Flush()
-
- got := buf.String()
- want := strings.Join([]string{
- magicChunk,
- "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum).
- "\x68\x10\xe6\xb6", // Checksum.
- "\x61\x62\x63\x64", // Uncompressed payload: "abcd".
- "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum).
- "\x5f\xeb\xf2\x10", // Checksum.
- "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150.
- "\x00\x41", // Compressed payload: tagLiteral, length=1, "A".
- "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1.
- "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1.
- "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1.
- "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum).
- "\x30\x85\x69\xeb", // Checksum.
- "\x70", // Compressed payload: Uncompressed length (varint encoded): 112.
- "\x00\x42", // Compressed payload: tagLiteral, length=1, "B".
- "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1.
- "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1.
- "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC".
- "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1.
- "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90.
- "\x00\x67", // Compressed payload: tagLiteral, length=1, "g".
- }, "")
- if got != want {
- t.Fatalf("\ngot: % x\nwant: % x", got, want)
- }
-}
-
-func TestEmitLiteral(t *testing.T) {
- testCases := []struct {
- length int
- want string
- }{
- {1, "\x00"},
- {2, "\x04"},
- {59, "\xe8"},
- {60, "\xec"},
- {61, "\xf0\x3c"},
- {62, "\xf0\x3d"},
- {254, "\xf0\xfd"},
- {255, "\xf0\xfe"},
- {256, "\xf0\xff"},
- {257, "\xf4\x00\x01"},
- {65534, "\xf4\xfd\xff"},
- {65535, "\xf4\xfe\xff"},
- {65536, "\xf4\xff\xff"},
- }
-
- dst := make([]byte, 70000)
- nines := bytes.Repeat([]byte{0x99}, 65536)
- for _, tc := range testCases {
- lit := nines[:tc.length]
- n := emitLiteral(dst, lit)
- if !bytes.HasSuffix(dst[:n], lit) {
- t.Errorf("length=%d: did not end with that many literal bytes", tc.length)
- continue
- }
- got := string(dst[:n-tc.length])
- if got != tc.want {
- t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want)
- continue
- }
- }
-}
-
-func TestEmitCopy(t *testing.T) {
- testCases := []struct {
- offset int
- length int
- want string
- }{
- {8, 04, "\x01\x08"},
- {8, 11, "\x1d\x08"},
- {8, 12, "\x2e\x08\x00"},
- {8, 13, "\x32\x08\x00"},
- {8, 59, "\xea\x08\x00"},
- {8, 60, "\xee\x08\x00"},
- {8, 61, "\xf2\x08\x00"},
- {8, 62, "\xf6\x08\x00"},
- {8, 63, "\xfa\x08\x00"},
- {8, 64, "\xfe\x08\x00"},
- {8, 65, "\xee\x08\x00\x05\x08"},
- {8, 66, "\xee\x08\x00\x09\x08"},
- {8, 67, "\xee\x08\x00\x0d\x08"},
- {8, 68, "\xfe\x08\x00\x01\x08"},
- {8, 69, "\xfe\x08\x00\x05\x08"},
- {8, 80, "\xfe\x08\x00\x3e\x08\x00"},
-
- {256, 04, "\x21\x00"},
- {256, 11, "\x3d\x00"},
- {256, 12, "\x2e\x00\x01"},
- {256, 13, "\x32\x00\x01"},
- {256, 59, "\xea\x00\x01"},
- {256, 60, "\xee\x00\x01"},
- {256, 61, "\xf2\x00\x01"},
- {256, 62, "\xf6\x00\x01"},
- {256, 63, "\xfa\x00\x01"},
- {256, 64, "\xfe\x00\x01"},
- {256, 65, "\xee\x00\x01\x25\x00"},
- {256, 66, "\xee\x00\x01\x29\x00"},
- {256, 67, "\xee\x00\x01\x2d\x00"},
- {256, 68, "\xfe\x00\x01\x21\x00"},
- {256, 69, "\xfe\x00\x01\x25\x00"},
- {256, 80, "\xfe\x00\x01\x3e\x00\x01"},
-
- {2048, 04, "\x0e\x00\x08"},
- {2048, 11, "\x2a\x00\x08"},
- {2048, 12, "\x2e\x00\x08"},
- {2048, 13, "\x32\x00\x08"},
- {2048, 59, "\xea\x00\x08"},
- {2048, 60, "\xee\x00\x08"},
- {2048, 61, "\xf2\x00\x08"},
- {2048, 62, "\xf6\x00\x08"},
- {2048, 63, "\xfa\x00\x08"},
- {2048, 64, "\xfe\x00\x08"},
- {2048, 65, "\xee\x00\x08\x12\x00\x08"},
- {2048, 66, "\xee\x00\x08\x16\x00\x08"},
- {2048, 67, "\xee\x00\x08\x1a\x00\x08"},
- {2048, 68, "\xfe\x00\x08\x0e\x00\x08"},
- {2048, 69, "\xfe\x00\x08\x12\x00\x08"},
- {2048, 80, "\xfe\x00\x08\x3e\x00\x08"},
- }
-
- dst := make([]byte, 1024)
- for _, tc := range testCases {
- n := emitCopy(dst, tc.offset, tc.length)
- got := string(dst[:n])
- if got != tc.want {
- t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want)
- }
- }
-}
-
-func TestNewBufferedWriter(t *testing.T) {
- // Test all 32 possible sub-sequences of these 5 input slices.
- //
- // Their lengths sum to 400,000, which is over 6 times the Writer ibuf
- // capacity: 6 * maxBlockSize is 393,216.
- inputs := [][]byte{
- bytes.Repeat([]byte{'a'}, 40000),
- bytes.Repeat([]byte{'b'}, 150000),
- bytes.Repeat([]byte{'c'}, 60000),
- bytes.Repeat([]byte{'d'}, 120000),
- bytes.Repeat([]byte{'e'}, 30000),
- }
-loop:
- for i := 0; i < 1< 0; {
- i := copy(x, src)
- x = x[i:]
- }
- return dst
-}
-
-func benchWords(b *testing.B, n int, decode bool) {
- // Note: the file is OS-language dependent so the resulting values are not
- // directly comparable for non-US-English OS installations.
- data := expand(readFile(b, "/usr/share/dict/words"), n)
- if decode {
- benchDecode(b, data)
- } else {
- benchEncode(b, data)
- }
-}
-
-func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) }
-func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) }
-func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) }
-func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) }
-func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) }
-func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) }
-func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) }
-func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) }
-func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) }
-func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) }
-func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) }
-func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) }
-
-func BenchmarkRandomEncode(b *testing.B) {
- rng := rand.New(rand.NewSource(1))
- data := make([]byte, 1<<20)
- for i := range data {
- data[i] = uint8(rng.Intn(256))
- }
- benchEncode(b, data)
-}
-
-// testFiles' values are copied directly from
-// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc
-// The label field is unused in snappy-go.
-var testFiles = []struct {
- label string
- filename string
- sizeLimit int
-}{
- {"html", "html", 0},
- {"urls", "urls.10K", 0},
- {"jpg", "fireworks.jpeg", 0},
- {"jpg_200", "fireworks.jpeg", 200},
- {"pdf", "paper-100k.pdf", 0},
- {"html4", "html_x_4", 0},
- {"txt1", "alice29.txt", 0},
- {"txt2", "asyoulik.txt", 0},
- {"txt3", "lcet10.txt", 0},
- {"txt4", "plrabn12.txt", 0},
- {"pb", "geo.protodata", 0},
- {"gaviota", "kppkn.gtb", 0},
-}
-
-const (
- // The benchmark data files are at this canonical URL.
- benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/"
-)
-
-func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) {
- bDir := filepath.FromSlash(*benchdataDir)
- filename := filepath.Join(bDir, basename)
- if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 {
- return nil
- }
-
- if !*download {
- b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b))
- }
- // Download the official snappy C++ implementation reference test data
- // files for benchmarking.
- if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) {
- return fmt.Errorf("failed to create %s: %s", bDir, err)
- }
-
- f, err := os.Create(filename)
- if err != nil {
- return fmt.Errorf("failed to create %s: %s", filename, err)
- }
- defer f.Close()
- defer func() {
- if errRet != nil {
- os.Remove(filename)
- }
- }()
- url := benchURL + basename
- resp, err := http.Get(url)
- if err != nil {
- return fmt.Errorf("failed to download %s: %s", url, err)
- }
- defer resp.Body.Close()
- if s := resp.StatusCode; s != http.StatusOK {
- return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s))
- }
- _, err = io.Copy(f, resp.Body)
- if err != nil {
- return fmt.Errorf("failed to download %s to %s: %s", url, filename, err)
- }
- return nil
-}
-
-func benchFile(b *testing.B, i int, decode bool) {
- if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil {
- b.Fatalf("failed to download testdata: %s", err)
- }
- bDir := filepath.FromSlash(*benchdataDir)
- data := readFile(b, filepath.Join(bDir, testFiles[i].filename))
- if n := testFiles[i].sizeLimit; 0 < n && n < len(data) {
- data = data[:n]
- }
- if decode {
- benchDecode(b, data)
- } else {
- benchEncode(b, data)
- }
-}
-
-// Naming convention is kept similar to what snappy's C++ implementation uses.
-func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) }
-func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) }
-func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) }
-func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) }
-func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) }
-func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) }
-func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) }
-func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) }
-func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) }
-func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) }
-func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) }
-func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) }
-func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) }
-func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) }
-func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) }
-func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) }
-func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) }
-func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) }
-func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) }
-func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) }
-func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) }
-func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) }
-func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) }
-func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) }
-
-func BenchmarkExtendMatch(b *testing.B) {
- tDir := filepath.FromSlash(*testdataDir)
- src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText))
- if err != nil {
- b.Fatalf("ReadFile: %v", err)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- for _, tc := range extendMatchGoldenTestCases {
- extendMatch(src, tc.i, tc.j)
- }
- }
-}
diff --git a/vendor/src/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt b/vendor/src/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt
deleted file mode 100644
index 86a1875..0000000
--- a/vendor/src/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt
+++ /dev/null
@@ -1,396 +0,0 @@
-Produced by David Widger. The previous edition was updated by Jose
-Menendez.
-
-
-
-
-
- THE ADVENTURES OF TOM SAWYER
- BY
- MARK TWAIN
- (Samuel Langhorne Clemens)
-
-
-
-
- P R E F A C E
-
-MOST of the adventures recorded in this book really occurred; one or
-two were experiences of my own, the rest those of boys who were
-schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but
-not from an individual--he is a combination of the characteristics of
-three boys whom I knew, and therefore belongs to the composite order of
-architecture.
-
-The odd superstitions touched upon were all prevalent among children
-and slaves in the West at the period of this story--that is to say,
-thirty or forty years ago.
-
-Although my book is intended mainly for the entertainment of boys and
-girls, I hope it will not be shunned by men and women on that account,
-for part of my plan has been to try to pleasantly remind adults of what
-they once were themselves, and of how they felt and thought and talked,
-and what queer enterprises they sometimes engaged in.
-
- THE AUTHOR.
-
-HARTFORD, 1876.
-
-
-
- T O M S A W Y E R
-
-
-
-CHAPTER I
-
-"TOM!"
-
-No answer.
-
-"TOM!"
-
-No answer.
-
-"What's gone with that boy, I wonder? You TOM!"
-
-No answer.
-
-The old lady pulled her spectacles down and looked over them about the
-room; then she put them up and looked out under them. She seldom or
-never looked THROUGH them for so small a thing as a boy; they were her
-state pair, the pride of her heart, and were built for "style," not
-service--she could have seen through a pair of stove-lids just as well.
-She looked perplexed for a moment, and then said, not fiercely, but
-still loud enough for the furniture to hear:
-
-"Well, I lay if I get hold of you I'll--"
-
-She did not finish, for by this time she was bending down and punching
-under the bed with the broom, and so she needed breath to punctuate the
-punches with. She resurrected nothing but the cat.
-
-"I never did see the beat of that boy!"
-
-She went to the open door and stood in it and looked out among the
-tomato vines and "jimpson" weeds that constituted the garden. No Tom.
-So she lifted up her voice at an angle calculated for distance and
-shouted:
-
-"Y-o-u-u TOM!"
-
-There was a slight noise behind her and she turned just in time to
-seize a small boy by the slack of his roundabout and arrest his flight.
-
-"There! I might 'a' thought of that closet. What you been doing in
-there?"
-
-"Nothing."
-
-"Nothing! Look at your hands. And look at your mouth. What IS that
-truck?"
-
-"I don't know, aunt."
-
-"Well, I know. It's jam--that's what it is. Forty times I've said if
-you didn't let that jam alone I'd skin you. Hand me that switch."
-
-The switch hovered in the air--the peril was desperate--
-
-"My! Look behind you, aunt!"
-
-The old lady whirled round, and snatched her skirts out of danger. The
-lad fled on the instant, scrambled up the high board-fence, and
-disappeared over it.
-
-His aunt Polly stood surprised a moment, and then broke into a gentle
-laugh.
-
-"Hang the boy, can't I never learn anything? Ain't he played me tricks
-enough like that for me to be looking out for him by this time? But old
-fools is the biggest fools there is. Can't learn an old dog new tricks,
-as the saying is. But my goodness, he never plays them alike, two days,
-and how is a body to know what's coming? He 'pears to know just how
-long he can torment me before I get my dander up, and he knows if he
-can make out to put me off for a minute or make me laugh, it's all down
-again and I can't hit him a lick. I ain't doing my duty by that boy,
-and that's the Lord's truth, goodness knows. Spare the rod and spile
-the child, as the Good Book says. I'm a laying up sin and suffering for
-us both, I know. He's full of the Old Scratch, but laws-a-me! he's my
-own dead sister's boy, poor thing, and I ain't got the heart to lash
-him, somehow. Every time I let him off, my conscience does hurt me so,
-and every time I hit him my old heart most breaks. Well-a-well, man
-that is born of woman is of few days and full of trouble, as the
-Scripture says, and I reckon it's so. He'll play hookey this evening, *
-and [* Southwestern for "afternoon"] I'll just be obleeged to make him
-work, to-morrow, to punish him. It's mighty hard to make him work
-Saturdays, when all the boys is having holiday, but he hates work more
-than he hates anything else, and I've GOT to do some of my duty by him,
-or I'll be the ruination of the child."
-
-Tom did play hookey, and he had a very good time. He got back home
-barely in season to help Jim, the small colored boy, saw next-day's
-wood and split the kindlings before supper--at least he was there in
-time to tell his adventures to Jim while Jim did three-fourths of the
-work. Tom's younger brother (or rather half-brother) Sid was already
-through with his part of the work (picking up chips), for he was a
-quiet boy, and had no adventurous, troublesome ways.
-
-While Tom was eating his supper, and stealing sugar as opportunity
-offered, Aunt Polly asked him questions that were full of guile, and
-very deep--for she wanted to trap him into damaging revealments. Like
-many other simple-hearted souls, it was her pet vanity to believe she
-was endowed with a talent for dark and mysterious diplomacy, and she
-loved to contemplate her most transparent devices as marvels of low
-cunning. Said she:
-
-"Tom, it was middling warm in school, warn't it?"
-
-"Yes'm."
-
-"Powerful warm, warn't it?"
-
-"Yes'm."
-
-"Didn't you want to go in a-swimming, Tom?"
-
-A bit of a scare shot through Tom--a touch of uncomfortable suspicion.
-He searched Aunt Polly's face, but it told him nothing. So he said:
-
-"No'm--well, not very much."
-
-The old lady reached out her hand and felt Tom's shirt, and said:
-
-"But you ain't too warm now, though." And it flattered her to reflect
-that she had discovered that the shirt was dry without anybody knowing
-that that was what she had in her mind. But in spite of her, Tom knew
-where the wind lay, now. So he forestalled what might be the next move:
-
-"Some of us pumped on our heads--mine's damp yet. See?"
-
-Aunt Polly was vexed to think she had overlooked that bit of
-circumstantial evidence, and missed a trick. Then she had a new
-inspiration:
-
-"Tom, you didn't have to undo your shirt collar where I sewed it, to
-pump on your head, did you? Unbutton your jacket!"
-
-The trouble vanished out of Tom's face. He opened his jacket. His
-shirt collar was securely sewed.
-
-"Bother! Well, go 'long with you. I'd made sure you'd played hookey
-and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a
-singed cat, as the saying is--better'n you look. THIS time."
-
-She was half sorry her sagacity had miscarried, and half glad that Tom
-had stumbled into obedient conduct for once.
-
-But Sidney said:
-
-"Well, now, if I didn't think you sewed his collar with white thread,
-but it's black."
-
-"Why, I did sew it with white! Tom!"
-
-But Tom did not wait for the rest. As he went out at the door he said:
-
-"Siddy, I'll lick you for that."
-
-In a safe place Tom examined two large needles which were thrust into
-the lapels of his jacket, and had thread bound about them--one needle
-carried white thread and the other black. He said:
-
-"She'd never noticed if it hadn't been for Sid. Confound it! sometimes
-she sews it with white, and sometimes she sews it with black. I wish to
-geeminy she'd stick to one or t'other--I can't keep the run of 'em. But
-I bet you I'll lam Sid for that. I'll learn him!"
-
-He was not the Model Boy of the village. He knew the model boy very
-well though--and loathed him.
-
-Within two minutes, or even less, he had forgotten all his troubles.
-Not because his troubles were one whit less heavy and bitter to him
-than a man's are to a man, but because a new and powerful interest bore
-them down and drove them out of his mind for the time--just as men's
-misfortunes are forgotten in the excitement of new enterprises. This
-new interest was a valued novelty in whistling, which he had just
-acquired from a negro, and he was suffering to practise it undisturbed.
-It consisted in a peculiar bird-like turn, a sort of liquid warble,
-produced by touching the tongue to the roof of the mouth at short
-intervals in the midst of the music--the reader probably remembers how
-to do it, if he has ever been a boy. Diligence and attention soon gave
-him the knack of it, and he strode down the street with his mouth full
-of harmony and his soul full of gratitude. He felt much as an
-astronomer feels who has discovered a new planet--no doubt, as far as
-strong, deep, unalloyed pleasure is concerned, the advantage was with
-the boy, not the astronomer.
-
-The summer evenings were long. It was not dark, yet. Presently Tom
-checked his whistle. A stranger was before him--a boy a shade larger
-than himself. A new-comer of any age or either sex was an impressive
-curiosity in the poor little shabby village of St. Petersburg. This boy
-was well dressed, too--well dressed on a week-day. This was simply
-astounding. His cap was a dainty thing, his close-buttoned blue cloth
-roundabout was new and natty, and so were his pantaloons. He had shoes
-on--and it was only Friday. He even wore a necktie, a bright bit of
-ribbon. He had a citified air about him that ate into Tom's vitals. The
-more Tom stared at the splendid marvel, the higher he turned up his
-nose at his finery and the shabbier and shabbier his own outfit seemed
-to him to grow. Neither boy spoke. If one moved, the other moved--but
-only sidewise, in a circle; they kept face to face and eye to eye all
-the time. Finally Tom said:
-
-"I can lick you!"
-
-"I'd like to see you try it."
-
-"Well, I can do it."
-
-"No you can't, either."
-
-"Yes I can."
-
-"No you can't."
-
-"I can."
-
-"You can't."
-
-"Can!"
-
-"Can't!"
-
-An uncomfortable pause. Then Tom said:
-
-"What's your name?"
-
-"'Tisn't any of your business, maybe."
-
-"Well I 'low I'll MAKE it my business."
-
-"Well why don't you?"
-
-"If you say much, I will."
-
-"Much--much--MUCH. There now."
-
-"Oh, you think you're mighty smart, DON'T you? I could lick you with
-one hand tied behind me, if I wanted to."
-
-"Well why don't you DO it? You SAY you can do it."
-
-"Well I WILL, if you fool with me."
-
-"Oh yes--I've seen whole families in the same fix."
-
-"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!"
-
-"You can lump that hat if you don't like it. I dare you to knock it
-off--and anybody that'll take a dare will suck eggs."
-
-"You're a liar!"
-
-"You're another."
-
-"You're a fighting liar and dasn't take it up."
-
-"Aw--take a walk!"
-
-"Say--if you give me much more of your sass I'll take and bounce a
-rock off'n your head."
-
-"Oh, of COURSE you will."
-
-"Well I WILL."
-
-"Well why don't you DO it then? What do you keep SAYING you will for?
-Why don't you DO it? It's because you're afraid."
-
-"I AIN'T afraid."
-
-"You are."
-
-"I ain't."
-
-"You are."
-
-Another pause, and more eying and sidling around each other. Presently
-they were shoulder to shoulder. Tom said:
-
-"Get away from here!"
-
-"Go away yourself!"
-
-"I won't."
-
-"I won't either."
-
-So they stood, each with a foot placed at an angle as a brace, and
-both shoving with might and main, and glowering at each other with
-hate. But neither could get an advantage. After struggling till both
-were hot and flushed, each relaxed his strain with watchful caution,
-and Tom said:
-
-"You're a coward and a pup. I'll tell my big brother on you, and he
-can thrash you with his little finger, and I'll make him do it, too."
-
-"What do I care for your big brother? I've got a brother that's bigger
-than he is--and what's more, he can throw him over that fence, too."
-[Both brothers were imaginary.]
-
-"That's a lie."
-
-"YOUR saying so don't make it so."
-
-Tom drew a line in the dust with his big toe, and said:
-
-"I dare you to step over that, and I'll lick you till you can't stand
-up. Anybody that'll take a dare will steal sheep."
-
-The new boy stepped over promptly, and said:
-
-"Now you said you'd do it, now let's see you do it."
-
-"Don't you crowd me now; you better look out."
-
-"Well, you SAID you'd do it--why don't you do it?"
-
-"By jingo! for two cents I WILL do it."
-
-The new boy took two broad coppers out of his pocket and held them out
-with derision. Tom struck them to the ground. In an instant both boys
-were rolling and tumbling in the dirt, gripped together like cats; and
-for the space of a minute they tugged and tore at each other's hair and
-clothes, punched and scratched each other's nose, and covered
-themselves with dust and glory. Presently the confusion took form, and
-through the fog of battle Tom appeared, seated astride the new boy, and
-pounding him with his fists. "Holler 'nuff!" said he.
-
-The boy only struggled to free himself. He was crying--mainly from rage.
-
-"Holler 'nuff!"--and the pounding went on.
-
-At last the stranger got out a smothered "'Nuff!" and Tom let him up
-and said:
-
-"Now that'll learn you. Better look out who you're fooling with next
-time."
-
-The new boy went off brushing the dust from his clothes, sobbing,
-snuffling, and occasionally looking back and shaking his head and
-threatening what he would do to Tom the "next time he caught him out."
-To which Tom responded with jeers, and started off in high feather, and
-as soon as his back was turned the new boy snatched up a stone, threw
-it and hit him between the shoulders and then turned tail and ran like
-an antelope. Tom chased the traitor home, and thus found out where he
-lived. He then held a position at the gate for some time, daring the
-enemy to come outside, but the enemy only made faces at him through the
-window and declined. At last the enemy's mother appeared, and called
-Tom a bad, vicious, vulgar child, and ordered him away. So he went
-away; but he said he "'lowed" to "lay" for that boy.
-
-He got home pretty late that night, and when he climbed cautiously in
-at the window, he uncovered an ambuscade, in the person of his aunt;
-and when she saw the state his clothes were in her resolution to turn
-his Saturday holiday into captivity at hard labor became adamantine in
-its firmness.
diff --git a/vendor/src/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy b/vendor/src/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy
deleted file mode 100644
index 9c56d98..0000000
Binary files a/vendor/src/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy and /dev/null differ
diff --git a/vendor/src/github.com/google/btree/LICENSE b/vendor/src/github.com/google/btree/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/src/github.com/google/btree/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/src/github.com/google/btree/README.md b/vendor/src/github.com/google/btree/README.md
deleted file mode 100644
index 6062a4d..0000000
--- a/vendor/src/github.com/google/btree/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# BTree implementation for Go
-
-![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master)
-
-This package provides an in-memory B-Tree implementation for Go, useful as
-an ordered, mutable data structure.
-
-The API is based off of the wonderful
-http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
-act as a drop-in replacement for gollrb trees.
-
-See http://godoc.org/github.com/google/btree for documentation.
diff --git a/vendor/src/github.com/google/btree/btree.go b/vendor/src/github.com/google/btree/btree.go
deleted file mode 100644
index 6088670..0000000
--- a/vendor/src/github.com/google/btree/btree.go
+++ /dev/null
@@ -1,738 +0,0 @@
-// Copyright 2014 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package btree implements in-memory B-Trees of arbitrary degree.
-//
-// btree implements an in-memory B-Tree for use as an ordered data structure.
-// It is not meant for persistent storage solutions.
-//
-// It has a flatter structure than an equivalent red-black or other binary tree,
-// which in some cases yields better memory usage and/or performance.
-// See some discussion on the matter here:
-// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
-// Note, though, that this project is in no way related to the C++ B-Tree
-// implementation written about there.
-//
-// Within this tree, each node contains a slice of items and a (possibly nil)
-// slice of children. For basic numeric values or raw structs, this can cause
-// efficiency differences when compared to equivalent C++ template code that
-// stores values in arrays within the node:
-// * Due to the overhead of storing values as interfaces (each
-// value needs to be stored as the value itself, then 2 words for the
-// interface pointing to that value and its type), resulting in higher
-// memory use.
-// * Since interfaces can point to values anywhere in memory, values are
-// most likely not stored in contiguous blocks, resulting in a higher
-// number of cache misses.
-// These issues don't tend to matter, though, when working with strings or other
-// heap-allocated structures, since C++-equivalent structures also must store
-// pointers and also distribute their values across the heap.
-//
-// This implementation is designed to be a drop-in replacement to gollrb.LLRB
-// trees, (http://github.com/petar/gollrb), an excellent and probably the most
-// widely used ordered tree implementation in the Go ecosystem currently.
-// Its functions, therefore, exactly mirror those of
-// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
-// support storing multiple equivalent values.
-package btree
-
-import (
- "fmt"
- "io"
- "sort"
- "strings"
-)
-
-// Item represents a single object in the tree.
-type Item interface {
- // Less tests whether the current item is less than the given argument.
- //
- // This must provide a strict weak ordering.
- // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
- // hold one of either a or b in the tree).
- Less(than Item) bool
-}
-
-const (
- DefaultFreeListSize = 32
-)
-
-var (
- nilItems = make(items, 16)
- nilChildren = make(children, 16)
-)
-
-// FreeList represents a free list of btree nodes. By default each
-// BTree has its own FreeList, but multiple BTrees can share the same
-// FreeList.
-// Two Btrees using the same freelist are not safe for concurrent write access.
-type FreeList struct {
- freelist []*node
-}
-
-// NewFreeList creates a new free list.
-// size is the maximum size of the returned free list.
-func NewFreeList(size int) *FreeList {
- return &FreeList{freelist: make([]*node, 0, size)}
-}
-
-func (f *FreeList) newNode() (n *node) {
- index := len(f.freelist) - 1
- if index < 0 {
- return new(node)
- }
- n = f.freelist[index]
- f.freelist[index] = nil
- f.freelist = f.freelist[:index]
- return
-}
-
-func (f *FreeList) freeNode(n *node) {
- if len(f.freelist) < cap(f.freelist) {
- f.freelist = append(f.freelist, n)
- }
-}
-
-// ItemIterator allows callers of Ascend* to iterate in-order over portions of
-// the tree. When this function returns false, iteration will stop and the
-// associated Ascend* function will immediately return.
-type ItemIterator func(i Item) bool
-
-// New creates a new B-Tree with the given degree.
-//
-// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
-// and 2-4 children).
-func New(degree int) *BTree {
- return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
-}
-
-// NewWithFreeList creates a new B-Tree that uses the given node free list.
-func NewWithFreeList(degree int, f *FreeList) *BTree {
- if degree <= 1 {
- panic("bad degree")
- }
- return &BTree{
- degree: degree,
- freelist: f,
- }
-}
-
-// items stores items in a node.
-type items []Item
-
-// insertAt inserts a value into the given index, pushing all subsequent values
-// forward.
-func (s *items) insertAt(index int, item Item) {
- *s = append(*s, nil)
- if index < len(*s) {
- copy((*s)[index+1:], (*s)[index:])
- }
- (*s)[index] = item
-}
-
-// removeAt removes a value at a given index, pulling all subsequent values
-// back.
-func (s *items) removeAt(index int) Item {
- item := (*s)[index]
- copy((*s)[index:], (*s)[index+1:])
- (*s)[len(*s)-1] = nil
- *s = (*s)[:len(*s)-1]
- return item
-}
-
-// pop removes and returns the last element in the list.
-func (s *items) pop() (out Item) {
- index := len(*s) - 1
- out = (*s)[index]
- (*s)[index] = nil
- *s = (*s)[:index]
- return
-}
-
-// truncate truncates this instance at index so that it contains only the
-// first index items. index must be less than or equal to length.
-func (s *items) truncate(index int) {
- var toClear items
- *s, toClear = (*s)[:index], (*s)[index:]
- for len(toClear) > 0 {
- toClear = toClear[copy(toClear, nilItems):]
- }
-}
-
-// find returns the index where the given item should be inserted into this
-// list. 'found' is true if the item already exists in the list at the given
-// index.
-func (s items) find(item Item) (index int, found bool) {
- i := sort.Search(len(s), func(i int) bool {
- return item.Less(s[i])
- })
- if i > 0 && !s[i-1].Less(item) {
- return i - 1, true
- }
- return i, false
-}
-
-// children stores child nodes in a node.
-type children []*node
-
-// insertAt inserts a value into the given index, pushing all subsequent values
-// forward.
-func (s *children) insertAt(index int, n *node) {
- *s = append(*s, nil)
- if index < len(*s) {
- copy((*s)[index+1:], (*s)[index:])
- }
- (*s)[index] = n
-}
-
-// removeAt removes a value at a given index, pulling all subsequent values
-// back.
-func (s *children) removeAt(index int) *node {
- n := (*s)[index]
- copy((*s)[index:], (*s)[index+1:])
- (*s)[len(*s)-1] = nil
- *s = (*s)[:len(*s)-1]
- return n
-}
-
-// pop removes and returns the last element in the list.
-func (s *children) pop() (out *node) {
- index := len(*s) - 1
- out = (*s)[index]
- (*s)[index] = nil
- *s = (*s)[:index]
- return
-}
-
-// truncate truncates this instance at index so that it contains only the
-// first index children. index must be less than or equal to length.
-func (s *children) truncate(index int) {
- var toClear children
- *s, toClear = (*s)[:index], (*s)[index:]
- for len(toClear) > 0 {
- toClear = toClear[copy(toClear, nilChildren):]
- }
-}
-
-// node is an internal node in a tree.
-//
-// It must at all times maintain the invariant that either
-// * len(children) == 0, len(items) unconstrained
-// * len(children) == len(items) + 1
-type node struct {
- items items
- children children
- t *BTree
-}
-
-// split splits the given node at the given index. The current node shrinks,
-// and this function returns the item that existed at that index and a new node
-// containing all items/children after it.
-func (n *node) split(i int) (Item, *node) {
- item := n.items[i]
- next := n.t.newNode()
- next.items = append(next.items, n.items[i+1:]...)
- n.items.truncate(i)
- if len(n.children) > 0 {
- next.children = append(next.children, n.children[i+1:]...)
- n.children.truncate(i + 1)
- }
- return item, next
-}
-
-// maybeSplitChild checks if a child should be split, and if so splits it.
-// Returns whether or not a split occurred.
-func (n *node) maybeSplitChild(i, maxItems int) bool {
- if len(n.children[i].items) < maxItems {
- return false
- }
- first := n.children[i]
- item, second := first.split(maxItems / 2)
- n.items.insertAt(i, item)
- n.children.insertAt(i+1, second)
- return true
-}
-
-// insert inserts an item into the subtree rooted at this node, making sure
-// no nodes in the subtree exceed maxItems items. Should an equivalent item be
-// be found/replaced by insert, it will be returned.
-func (n *node) insert(item Item, maxItems int) Item {
- i, found := n.items.find(item)
- if found {
- out := n.items[i]
- n.items[i] = item
- return out
- }
- if len(n.children) == 0 {
- n.items.insertAt(i, item)
- return nil
- }
- if n.maybeSplitChild(i, maxItems) {
- inTree := n.items[i]
- switch {
- case item.Less(inTree):
- // no change, we want first split node
- case inTree.Less(item):
- i++ // we want second split node
- default:
- out := n.items[i]
- n.items[i] = item
- return out
- }
- }
- return n.children[i].insert(item, maxItems)
-}
-
-// get finds the given key in the subtree and returns it.
-func (n *node) get(key Item) Item {
- i, found := n.items.find(key)
- if found {
- return n.items[i]
- } else if len(n.children) > 0 {
- return n.children[i].get(key)
- }
- return nil
-}
-
-// min returns the first item in the subtree.
-func min(n *node) Item {
- if n == nil {
- return nil
- }
- for len(n.children) > 0 {
- n = n.children[0]
- }
- if len(n.items) == 0 {
- return nil
- }
- return n.items[0]
-}
-
-// max returns the last item in the subtree.
-func max(n *node) Item {
- if n == nil {
- return nil
- }
- for len(n.children) > 0 {
- n = n.children[len(n.children)-1]
- }
- if len(n.items) == 0 {
- return nil
- }
- return n.items[len(n.items)-1]
-}
-
-// toRemove details what item to remove in a node.remove call.
-type toRemove int
-
-const (
- removeItem toRemove = iota // removes the given item
- removeMin // removes smallest item in the subtree
- removeMax // removes largest item in the subtree
-)
-
-// remove removes an item from the subtree rooted at this node.
-func (n *node) remove(item Item, minItems int, typ toRemove) Item {
- var i int
- var found bool
- switch typ {
- case removeMax:
- if len(n.children) == 0 {
- return n.items.pop()
- }
- i = len(n.items)
- case removeMin:
- if len(n.children) == 0 {
- return n.items.removeAt(0)
- }
- i = 0
- case removeItem:
- i, found = n.items.find(item)
- if len(n.children) == 0 {
- if found {
- return n.items.removeAt(i)
- }
- return nil
- }
- default:
- panic("invalid type")
- }
- // If we get to here, we have children.
- child := n.children[i]
- if len(child.items) <= minItems {
- return n.growChildAndRemove(i, item, minItems, typ)
- }
- // Either we had enough items to begin with, or we've done some
- // merging/stealing, because we've got enough now and we're ready to return
- // stuff.
- if found {
- // The item exists at index 'i', and the child we've selected can give us a
- // predecessor, since if we've gotten here it's got > minItems items in it.
- out := n.items[i]
- // We use our special-case 'remove' call with typ=maxItem to pull the
- // predecessor of item i (the rightmost leaf of our immediate left child)
- // and set it into where we pulled the item from.
- n.items[i] = child.remove(nil, minItems, removeMax)
- return out
- }
- // Final recursive call. Once we're here, we know that the item isn't in this
- // node and that the child is big enough to remove from.
- return child.remove(item, minItems, typ)
-}
-
-// growChildAndRemove grows child 'i' to make sure it's possible to remove an
-// item from it while keeping it at minItems, then calls remove to actually
-// remove it.
-//
-// Most documentation says we have to do two sets of special casing:
-// 1) item is in this node
-// 2) item is in child
-// In both cases, we need to handle the two subcases:
-// A) node has enough values that it can spare one
-// B) node doesn't have enough values
-// For the latter, we have to check:
-// a) left sibling has node to spare
-// b) right sibling has node to spare
-// c) we must merge
-// To simplify our code here, we handle cases #1 and #2 the same:
-// If a node doesn't have enough items, we make sure it does (using a,b,c).
-// We then simply redo our remove call, and the second time (regardless of
-// whether we're in case 1 or 2), we'll have enough items and can guarantee
-// that we hit case A.
-func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
- child := n.children[i]
- if i > 0 && len(n.children[i-1].items) > minItems {
- // Steal from left child
- stealFrom := n.children[i-1]
- stolenItem := stealFrom.items.pop()
- child.items.insertAt(0, n.items[i-1])
- n.items[i-1] = stolenItem
- if len(stealFrom.children) > 0 {
- child.children.insertAt(0, stealFrom.children.pop())
- }
- } else if i < len(n.items) && len(n.children[i+1].items) > minItems {
- // steal from right child
- stealFrom := n.children[i+1]
- stolenItem := stealFrom.items.removeAt(0)
- child.items = append(child.items, n.items[i])
- n.items[i] = stolenItem
- if len(stealFrom.children) > 0 {
- child.children = append(child.children, stealFrom.children.removeAt(0))
- }
- } else {
- if i >= len(n.items) {
- i--
- child = n.children[i]
- }
- // merge with right child
- mergeItem := n.items.removeAt(i)
- mergeChild := n.children.removeAt(i + 1)
- child.items = append(child.items, mergeItem)
- child.items = append(child.items, mergeChild.items...)
- child.children = append(child.children, mergeChild.children...)
- n.t.freeNode(mergeChild)
- }
- return n.remove(item, minItems, typ)
-}
-
-type direction int
-
-const (
- descend = direction(-1)
- ascend = direction(+1)
-)
-
-// iterate provides a simple method for iterating over elements in the tree.
-//
-// When ascending, the 'start' should be less than 'stop' and when descending,
-// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
-// will force the iterator to include the first item when it equals 'start',
-// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
-// "greaterThan" or "lessThan" queries.
-func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
- var ok bool
- switch dir {
- case ascend:
- for i := 0; i < len(n.items); i++ {
- if start != nil && n.items[i].Less(start) {
- continue
- }
- if len(n.children) > 0 {
- if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
- return hit, false
- }
- }
- if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
- hit = true
- continue
- }
- hit = true
- if stop != nil && !n.items[i].Less(stop) {
- return hit, false
- }
- if !iter(n.items[i]) {
- return hit, false
- }
- }
- if len(n.children) > 0 {
- if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
- return hit, false
- }
- }
- case descend:
- for i := len(n.items) - 1; i >= 0; i-- {
- if start != nil && !n.items[i].Less(start) {
- if !includeStart || hit || start.Less(n.items[i]) {
- continue
- }
- }
- if len(n.children) > 0 {
- if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
- return hit, false
- }
- }
- if stop != nil && !stop.Less(n.items[i]) {
- return hit, false // continue
- }
- hit = true
- if !iter(n.items[i]) {
- return hit, false
- }
- }
- if len(n.children) > 0 {
- if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
- return hit, false
- }
- }
- }
- return hit, true
-}
-
-// Used for testing/debugging purposes.
-func (n *node) print(w io.Writer, level int) {
- fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
- for _, c := range n.children {
- c.print(w, level+1)
- }
-}
-
-// BTree is an implementation of a B-Tree.
-//
-// BTree stores Item instances in an ordered structure, allowing easy insertion,
-// removal, and iteration.
-//
-// Write operations are not safe for concurrent mutation by multiple
-// goroutines, but Read operations are.
-type BTree struct {
- degree int
- length int
- root *node
- freelist *FreeList
-}
-
-// maxItems returns the max number of items to allow per node.
-func (t *BTree) maxItems() int {
- return t.degree*2 - 1
-}
-
-// minItems returns the min number of items to allow per node (ignored for the
-// root node).
-func (t *BTree) minItems() int {
- return t.degree - 1
-}
-
-func (t *BTree) newNode() (n *node) {
- n = t.freelist.newNode()
- n.t = t
- return
-}
-
-func (t *BTree) freeNode(n *node) {
- // clear to allow GC
- n.items.truncate(0)
- n.children.truncate(0)
- n.t = nil // clear to allow GC
- t.freelist.freeNode(n)
-}
-
-// ReplaceOrInsert adds the given item to the tree. If an item in the tree
-// already equals the given one, it is removed from the tree and returned.
-// Otherwise, nil is returned.
-//
-// nil cannot be added to the tree (will panic).
-func (t *BTree) ReplaceOrInsert(item Item) Item {
- if item == nil {
- panic("nil item being added to BTree")
- }
- if t.root == nil {
- t.root = t.newNode()
- t.root.items = append(t.root.items, item)
- t.length++
- return nil
- } else if len(t.root.items) >= t.maxItems() {
- item2, second := t.root.split(t.maxItems() / 2)
- oldroot := t.root
- t.root = t.newNode()
- t.root.items = append(t.root.items, item2)
- t.root.children = append(t.root.children, oldroot, second)
- }
- out := t.root.insert(item, t.maxItems())
- if out == nil {
- t.length++
- }
- return out
-}
-
-// Delete removes an item equal to the passed in item from the tree, returning
-// it. If no such item exists, returns nil.
-func (t *BTree) Delete(item Item) Item {
- return t.deleteItem(item, removeItem)
-}
-
-// DeleteMin removes the smallest item in the tree and returns it.
-// If no such item exists, returns nil.
-func (t *BTree) DeleteMin() Item {
- return t.deleteItem(nil, removeMin)
-}
-
-// DeleteMax removes the largest item in the tree and returns it.
-// If no such item exists, returns nil.
-func (t *BTree) DeleteMax() Item {
- return t.deleteItem(nil, removeMax)
-}
-
-func (t *BTree) deleteItem(item Item, typ toRemove) Item {
- if t.root == nil || len(t.root.items) == 0 {
- return nil
- }
- out := t.root.remove(item, t.minItems(), typ)
- if len(t.root.items) == 0 && len(t.root.children) > 0 {
- oldroot := t.root
- t.root = t.root.children[0]
- t.freeNode(oldroot)
- }
- if out != nil {
- t.length--
- }
- return out
-}
-
-// AscendRange calls the iterator for every value in the tree within the range
-// [greaterOrEqual, lessThan), until iterator returns false.
-func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
- if t.root == nil {
- return
- }
- t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
-}
-
-// AscendLessThan calls the iterator for every value in the tree within the range
-// [first, pivot), until iterator returns false.
-func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
- if t.root == nil {
- return
- }
- t.root.iterate(ascend, nil, pivot, false, false, iterator)
-}
-
-// AscendGreaterOrEqual calls the iterator for every value in the tree within
-// the range [pivot, last], until iterator returns false.
-func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
- if t.root == nil {
- return
- }
- t.root.iterate(ascend, pivot, nil, true, false, iterator)
-}
-
-// Ascend calls the iterator for every value in the tree within the range
-// [first, last], until iterator returns false.
-func (t *BTree) Ascend(iterator ItemIterator) {
- if t.root == nil {
- return
- }
- t.root.iterate(ascend, nil, nil, false, false, iterator)
-}
-
-// DescendRange calls the iterator for every value in the tree within the range
-// [lessOrEqual, greaterThan), until iterator returns false.
-func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
- if t.root == nil {
- return
- }
- t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
-}
-
-// DescendLessOrEqual calls the iterator for every value in the tree within the range
-// [pivot, first], until iterator returns false.
-func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
- if t.root == nil {
- return
- }
- t.root.iterate(descend, pivot, nil, true, false, iterator)
-}
-
-// DescendGreaterThan calls the iterator for every value in the tree within
-// the range (pivot, last], until iterator returns false.
-func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
- if t.root == nil {
- return
- }
- t.root.iterate(descend, nil, pivot, false, false, iterator)
-}
-
-// Descend calls the iterator for every value in the tree within the range
-// [last, first], until iterator returns false.
-func (t *BTree) Descend(iterator ItemIterator) {
- if t.root == nil {
- return
- }
- t.root.iterate(descend, nil, nil, false, false, iterator)
-}
-
-// Get looks for the key item in the tree, returning it. It returns nil if
-// unable to find that item.
-func (t *BTree) Get(key Item) Item {
- if t.root == nil {
- return nil
- }
- return t.root.get(key)
-}
-
-// Min returns the smallest item in the tree, or nil if the tree is empty.
-func (t *BTree) Min() Item {
- return min(t.root)
-}
-
-// Max returns the largest item in the tree, or nil if the tree is empty.
-func (t *BTree) Max() Item {
- return max(t.root)
-}
-
-// Has returns true if the given key is in the tree.
-func (t *BTree) Has(key Item) bool {
- return t.Get(key) != nil
-}
-
-// Len returns the number of items currently in the tree.
-func (t *BTree) Len() int {
- return t.length
-}
-
-// Int implements the Item interface for integers.
-type Int int
-
-// Less returns true if int(a) < int(b).
-func (a Int) Less(b Item) bool {
- return a < b.(Int)
-}
diff --git a/vendor/src/github.com/google/btree/btree_mem.go b/vendor/src/github.com/google/btree/btree_mem.go
deleted file mode 100644
index cb95b7f..0000000
--- a/vendor/src/github.com/google/btree/btree_mem.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2014 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build ignore
-
-// This binary compares memory usage between btree and gollrb.
-package main
-
-import (
- "flag"
- "fmt"
- "math/rand"
- "runtime"
- "time"
-
- "github.com/google/btree"
- "github.com/petar/GoLLRB/llrb"
-)
-
-var (
- size = flag.Int("size", 1000000, "size of the tree to build")
- degree = flag.Int("degree", 8, "degree of btree")
- gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
-)
-
-func main() {
- flag.Parse()
- vals := rand.Perm(*size)
- var t, v interface{}
- v = vals
- var stats runtime.MemStats
- for i := 0; i < 10; i++ {
- runtime.GC()
- }
- fmt.Println("-------- BEFORE ----------")
- runtime.ReadMemStats(&stats)
- fmt.Printf("%+v\n", stats)
- start := time.Now()
- if *gollrb {
- tr := llrb.New()
- for _, v := range vals {
- tr.ReplaceOrInsert(llrb.Int(v))
- }
- t = tr // keep it around
- } else {
- tr := btree.New(*degree)
- for _, v := range vals {
- tr.ReplaceOrInsert(btree.Int(v))
- }
- t = tr // keep it around
- }
- fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
- fmt.Println("-------- AFTER ----------")
- runtime.ReadMemStats(&stats)
- fmt.Printf("%+v\n", stats)
- for i := 0; i < 10; i++ {
- runtime.GC()
- }
- fmt.Println("-------- AFTER GC ----------")
- runtime.ReadMemStats(&stats)
- fmt.Printf("%+v\n", stats)
- if t == v {
- fmt.Println("to make sure vals and tree aren't GC'd")
- }
-}
diff --git a/vendor/src/github.com/google/btree/btree_test.go b/vendor/src/github.com/google/btree/btree_test.go
deleted file mode 100644
index f01394a..0000000
--- a/vendor/src/github.com/google/btree/btree_test.go
+++ /dev/null
@@ -1,563 +0,0 @@
-// Copyright 2014 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package btree
-
-import (
- "flag"
- "fmt"
- "math/rand"
- "reflect"
- "sort"
- "testing"
- "time"
-)
-
-func init() {
- seed := time.Now().Unix()
- fmt.Println(seed)
- rand.Seed(seed)
-}
-
-// perm returns a random permutation of n Int items in the range [0, n).
-func perm(n int) (out []Item) {
- for _, v := range rand.Perm(n) {
- out = append(out, Int(v))
- }
- return
-}
-
-// rang returns an ordered list of Int items in the range [0, n).
-func rang(n int) (out []Item) {
- for i := 0; i < n; i++ {
- out = append(out, Int(i))
- }
- return
-}
-
-// all extracts all items from a tree in order as a slice.
-func all(t *BTree) (out []Item) {
- t.Ascend(func(a Item) bool {
- out = append(out, a)
- return true
- })
- return
-}
-
-// rangerev returns a reversed ordered list of Int items in the range [0, n).
-func rangrev(n int) (out []Item) {
- for i := n - 1; i >= 0; i-- {
- out = append(out, Int(i))
- }
- return
-}
-
-// allrev extracts all items from a tree in reverse order as a slice.
-func allrev(t *BTree) (out []Item) {
- t.Descend(func(a Item) bool {
- out = append(out, a)
- return true
- })
- return
-}
-
-var btreeDegree = flag.Int("degree", 32, "B-Tree degree")
-
-func TestBTree(t *testing.T) {
- tr := New(*btreeDegree)
- const treeSize = 10000
- for i := 0; i < 10; i++ {
- if min := tr.Min(); min != nil {
- t.Fatalf("empty min, got %+v", min)
- }
- if max := tr.Max(); max != nil {
- t.Fatalf("empty max, got %+v", max)
- }
- for _, item := range perm(treeSize) {
- if x := tr.ReplaceOrInsert(item); x != nil {
- t.Fatal("insert found item", item)
- }
- }
- for _, item := range perm(treeSize) {
- if x := tr.ReplaceOrInsert(item); x == nil {
- t.Fatal("insert didn't find item", item)
- }
- }
- if min, want := tr.Min(), Item(Int(0)); min != want {
- t.Fatalf("min: want %+v, got %+v", want, min)
- }
- if max, want := tr.Max(), Item(Int(treeSize-1)); max != want {
- t.Fatalf("max: want %+v, got %+v", want, max)
- }
- got := all(tr)
- want := rang(treeSize)
- if !reflect.DeepEqual(got, want) {
- t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want)
- }
-
- gotrev := allrev(tr)
- wantrev := rangrev(treeSize)
- if !reflect.DeepEqual(gotrev, wantrev) {
- t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want)
- }
-
- for _, item := range perm(treeSize) {
- if x := tr.Delete(item); x == nil {
- t.Fatalf("didn't find %v", item)
- }
- }
- if got = all(tr); len(got) > 0 {
- t.Fatalf("some left!: %v", got)
- }
- }
-}
-
-func ExampleBTree() {
- tr := New(*btreeDegree)
- for i := Int(0); i < 10; i++ {
- tr.ReplaceOrInsert(i)
- }
- fmt.Println("len: ", tr.Len())
- fmt.Println("get3: ", tr.Get(Int(3)))
- fmt.Println("get100: ", tr.Get(Int(100)))
- fmt.Println("del4: ", tr.Delete(Int(4)))
- fmt.Println("del100: ", tr.Delete(Int(100)))
- fmt.Println("replace5: ", tr.ReplaceOrInsert(Int(5)))
- fmt.Println("replace100:", tr.ReplaceOrInsert(Int(100)))
- fmt.Println("min: ", tr.Min())
- fmt.Println("delmin: ", tr.DeleteMin())
- fmt.Println("max: ", tr.Max())
- fmt.Println("delmax: ", tr.DeleteMax())
- fmt.Println("len: ", tr.Len())
- // Output:
- // len: 10
- // get3: 3
- // get100:
- // del4: 4
- // del100:
- // replace5: 5
- // replace100:
- // min: 0
- // delmin: 0
- // max: 100
- // delmax: 100
- // len: 8
-}
-
-func TestDeleteMin(t *testing.T) {
- tr := New(3)
- for _, v := range perm(100) {
- tr.ReplaceOrInsert(v)
- }
- var got []Item
- for v := tr.DeleteMin(); v != nil; v = tr.DeleteMin() {
- got = append(got, v)
- }
- if want := rang(100); !reflect.DeepEqual(got, want) {
- t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
- }
-}
-
-func TestDeleteMax(t *testing.T) {
- tr := New(3)
- for _, v := range perm(100) {
- tr.ReplaceOrInsert(v)
- }
- var got []Item
- for v := tr.DeleteMax(); v != nil; v = tr.DeleteMax() {
- got = append(got, v)
- }
- // Reverse our list.
- for i := 0; i < len(got)/2; i++ {
- got[i], got[len(got)-i-1] = got[len(got)-i-1], got[i]
- }
- if want := rang(100); !reflect.DeepEqual(got, want) {
- t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
- }
-}
-
-func TestAscendRange(t *testing.T) {
- tr := New(2)
- for _, v := range perm(100) {
- tr.ReplaceOrInsert(v)
- }
- var got []Item
- tr.AscendRange(Int(40), Int(60), func(a Item) bool {
- got = append(got, a)
- return true
- })
- if want := rang(100)[40:60]; !reflect.DeepEqual(got, want) {
- t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
- }
- got = got[:0]
- tr.AscendRange(Int(40), Int(60), func(a Item) bool {
- if a.(Int) > 50 {
- return false
- }
- got = append(got, a)
- return true
- })
- if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) {
- t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
- }
-}
-
-func TestDescendRange(t *testing.T) {
- tr := New(2)
- for _, v := range perm(100) {
- tr.ReplaceOrInsert(v)
- }
- var got []Item
- tr.DescendRange(Int(60), Int(40), func(a Item) bool {
- got = append(got, a)
- return true
- })
- if want := rangrev(100)[39:59]; !reflect.DeepEqual(got, want) {
- t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want)
- }
- got = got[:0]
- tr.DescendRange(Int(60), Int(40), func(a Item) bool {
- if a.(Int) < 50 {
- return false
- }
- got = append(got, a)
- return true
- })
- if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) {
- t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want)
- }
-}
-func TestAscendLessThan(t *testing.T) {
- tr := New(*btreeDegree)
- for _, v := range perm(100) {
- tr.ReplaceOrInsert(v)
- }
- var got []Item
- tr.AscendLessThan(Int(60), func(a Item) bool {
- got = append(got, a)
- return true
- })
- if want := rang(100)[:60]; !reflect.DeepEqual(got, want) {
- t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
- }
- got = got[:0]
- tr.AscendLessThan(Int(60), func(a Item) bool {
- if a.(Int) > 50 {
- return false
- }
- got = append(got, a)
- return true
- })
- if want := rang(100)[:51]; !reflect.DeepEqual(got, want) {
- t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
- }
-}
-
-func TestDescendLessOrEqual(t *testing.T) {
- tr := New(*btreeDegree)
- for _, v := range perm(100) {
- tr.ReplaceOrInsert(v)
- }
- var got []Item
- tr.DescendLessOrEqual(Int(40), func(a Item) bool {
- got = append(got, a)
- return true
- })
- if want := rangrev(100)[59:]; !reflect.DeepEqual(got, want) {
- t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want)
- }
- got = got[:0]
- tr.DescendLessOrEqual(Int(60), func(a Item) bool {
- if a.(Int) < 50 {
- return false
- }
- got = append(got, a)
- return true
- })
- if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) {
- t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want)
- }
-}
-func TestAscendGreaterOrEqual(t *testing.T) {
- tr := New(*btreeDegree)
- for _, v := range perm(100) {
- tr.ReplaceOrInsert(v)
- }
- var got []Item
- tr.AscendGreaterOrEqual(Int(40), func(a Item) bool {
- got = append(got, a)
- return true
- })
- if want := rang(100)[40:]; !reflect.DeepEqual(got, want) {
- t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
- }
- got = got[:0]
- tr.AscendGreaterOrEqual(Int(40), func(a Item) bool {
- if a.(Int) > 50 {
- return false
- }
- got = append(got, a)
- return true
- })
- if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) {
- t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want)
- }
-}
-
-func TestDescendGreaterThan(t *testing.T) {
- tr := New(*btreeDegree)
- for _, v := range perm(100) {
- tr.ReplaceOrInsert(v)
- }
- var got []Item
- tr.DescendGreaterThan(Int(40), func(a Item) bool {
- got = append(got, a)
- return true
- })
- if want := rangrev(100)[:59]; !reflect.DeepEqual(got, want) {
- t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want)
- }
- got = got[:0]
- tr.DescendGreaterThan(Int(40), func(a Item) bool {
- if a.(Int) < 50 {
- return false
- }
- got = append(got, a)
- return true
- })
- if want := rangrev(100)[:50]; !reflect.DeepEqual(got, want) {
- t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want)
- }
-}
-
-const benchmarkTreeSize = 10000
-
-func BenchmarkInsert(b *testing.B) {
- b.StopTimer()
- insertP := perm(benchmarkTreeSize)
- b.StartTimer()
- i := 0
- for i < b.N {
- tr := New(*btreeDegree)
- for _, item := range insertP {
- tr.ReplaceOrInsert(item)
- i++
- if i >= b.N {
- return
- }
- }
- }
-}
-
-func BenchmarkDelete(b *testing.B) {
- b.StopTimer()
- insertP := perm(benchmarkTreeSize)
- removeP := perm(benchmarkTreeSize)
- b.StartTimer()
- i := 0
- for i < b.N {
- b.StopTimer()
- tr := New(*btreeDegree)
- for _, v := range insertP {
- tr.ReplaceOrInsert(v)
- }
- b.StartTimer()
- for _, item := range removeP {
- tr.Delete(item)
- i++
- if i >= b.N {
- return
- }
- }
- if tr.Len() > 0 {
- panic(tr.Len())
- }
- }
-}
-
-func BenchmarkGet(b *testing.B) {
- b.StopTimer()
- insertP := perm(benchmarkTreeSize)
- removeP := perm(benchmarkTreeSize)
- b.StartTimer()
- i := 0
- for i < b.N {
- b.StopTimer()
- tr := New(*btreeDegree)
- for _, v := range insertP {
- tr.ReplaceOrInsert(v)
- }
- b.StartTimer()
- for _, item := range removeP {
- tr.Get(item)
- i++
- if i >= b.N {
- return
- }
- }
- }
-}
-
-type byInts []Item
-
-func (a byInts) Len() int {
- return len(a)
-}
-
-func (a byInts) Less(i, j int) bool {
- return a[i].(Int) < a[j].(Int)
-}
-
-func (a byInts) Swap(i, j int) {
- a[i], a[j] = a[j], a[i]
-}
-
-func BenchmarkAscend(b *testing.B) {
- arr := perm(benchmarkTreeSize)
- tr := New(*btreeDegree)
- for _, v := range arr {
- tr.ReplaceOrInsert(v)
- }
- sort.Sort(byInts(arr))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- j := 0
- tr.Ascend(func(item Item) bool {
- if item.(Int) != arr[j].(Int) {
- b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
- }
- j++
- return true
- })
- }
-}
-
-func BenchmarkDescend(b *testing.B) {
- arr := perm(benchmarkTreeSize)
- tr := New(*btreeDegree)
- for _, v := range arr {
- tr.ReplaceOrInsert(v)
- }
- sort.Sort(byInts(arr))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- j := len(arr) - 1
- tr.Descend(func(item Item) bool {
- if item.(Int) != arr[j].(Int) {
- b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
- }
- j--
- return true
- })
- }
-}
-func BenchmarkAscendRange(b *testing.B) {
- arr := perm(benchmarkTreeSize)
- tr := New(*btreeDegree)
- for _, v := range arr {
- tr.ReplaceOrInsert(v)
- }
- sort.Sort(byInts(arr))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- j := 100
- tr.AscendRange(Int(100), arr[len(arr)-100], func(item Item) bool {
- if item.(Int) != arr[j].(Int) {
- b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
- }
- j++
- return true
- })
- if j != len(arr)-100 {
- b.Fatalf("expected: %v, got %v", len(arr)-100, j)
- }
- }
-}
-
-func BenchmarkDescendRange(b *testing.B) {
- arr := perm(benchmarkTreeSize)
- tr := New(*btreeDegree)
- for _, v := range arr {
- tr.ReplaceOrInsert(v)
- }
- sort.Sort(byInts(arr))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- j := len(arr) - 100
- tr.DescendRange(arr[len(arr)-100], Int(100), func(item Item) bool {
- if item.(Int) != arr[j].(Int) {
- b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
- }
- j--
- return true
- })
- if j != 100 {
- b.Fatalf("expected: %v, got %v", len(arr)-100, j)
- }
- }
-}
-func BenchmarkAscendGreaterOrEqual(b *testing.B) {
- arr := perm(benchmarkTreeSize)
- tr := New(*btreeDegree)
- for _, v := range arr {
- tr.ReplaceOrInsert(v)
- }
- sort.Sort(byInts(arr))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- j := 100
- k := 0
- tr.AscendGreaterOrEqual(Int(100), func(item Item) bool {
- if item.(Int) != arr[j].(Int) {
- b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
- }
- j++
- k++
- return true
- })
- if j != len(arr) {
- b.Fatalf("expected: %v, got %v", len(arr), j)
- }
- if k != len(arr)-100 {
- b.Fatalf("expected: %v, got %v", len(arr)-100, k)
- }
- }
-}
-func BenchmarkDescendLessOrEqual(b *testing.B) {
- arr := perm(benchmarkTreeSize)
- tr := New(*btreeDegree)
- for _, v := range arr {
- tr.ReplaceOrInsert(v)
- }
- sort.Sort(byInts(arr))
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- j := len(arr) - 100
- k := len(arr)
- tr.DescendLessOrEqual(arr[len(arr)-100], func(item Item) bool {
- if item.(Int) != arr[j].(Int) {
- b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int))
- }
- j--
- k--
- return true
- })
- if j != -1 {
- b.Fatalf("expected: %v, got %v", -1, j)
- }
- if k != 99 {
- b.Fatalf("expected: %v, got %v", 99, k)
- }
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/activity.go b/vendor/src/github.com/google/go-github/github/activity.go
deleted file mode 100644
index d719ebb..0000000
--- a/vendor/src/github.com/google/go-github/github/activity.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-// ActivityService handles communication with the activity related
-// methods of the GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/
-type ActivityService service
-
-// FeedLink represents a link to a related resource.
-type FeedLink struct {
- HRef *string `json:"href,omitempty"`
- Type *string `json:"type,omitempty"`
-}
-
-// Feeds represents timeline resources in Atom format.
-type Feeds struct {
- TimelineURL *string `json:"timeline_url,omitempty"`
- UserURL *string `json:"user_url,omitempty"`
- CurrentUserPublicURL *string `json:"current_user_public_url,omitempty"`
- CurrentUserURL *string `json:"current_user_url,omitempty"`
- CurrentUserActorURL *string `json:"current_user_actor_url,omitempty"`
- CurrentUserOrganizationURL *string `json:"current_user_organization_url,omitempty"`
- CurrentUserOrganizationURLs []string `json:"current_user_organization_urls,omitempty"`
- Links *struct {
- Timeline *FeedLink `json:"timeline,omitempty"`
- User *FeedLink `json:"user,omitempty"`
- CurrentUserPublic *FeedLink `json:"current_user_public,omitempty"`
- CurrentUser *FeedLink `json:"current_user,omitempty"`
- CurrentUserActor *FeedLink `json:"current_user_actor,omitempty"`
- CurrentUserOrganization *FeedLink `json:"current_user_organization,omitempty"`
- CurrentUserOrganizations []FeedLink `json:"current_user_organizations,omitempty"`
- } `json:"_links,omitempty"`
-}
-
-// ListFeeds lists all the feeds available to the authenticated user.
-//
-// GitHub provides several timeline resources in Atom format:
-// Timeline: The GitHub global public timeline
-// User: The public timeline for any user, using URI template
-// Current user public: The public timeline for the authenticated user
-// Current user: The private timeline for the authenticated user
-// Current user actor: The private timeline for activity created by the
-// authenticated user
-// Current user organizations: The private timeline for the organizations
-// the authenticated user is a member of.
-//
-// Note: Private feeds are only returned when authenticating via Basic Auth
-// since current feed URIs use the older, non revocable auth tokens.
-func (s *ActivityService) ListFeeds() (*Feeds, *Response, error) {
- req, err := s.client.NewRequest("GET", "feeds", nil)
- if err != nil {
- return nil, nil, err
- }
-
- f := &Feeds{}
- resp, err := s.client.Do(req, f)
- if err != nil {
- return nil, resp, err
- }
-
- return f, resp, nil
-}
diff --git a/vendor/src/github.com/google/go-github/github/activity_events.go b/vendor/src/github.com/google/go-github/github/activity_events.go
deleted file mode 100644
index a0e5f08..0000000
--- a/vendor/src/github.com/google/go-github/github/activity_events.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "time"
-)
-
-// Event represents a GitHub event.
-type Event struct {
- Type *string `json:"type,omitempty"`
- Public *bool `json:"public"`
- RawPayload *json.RawMessage `json:"payload,omitempty"`
- Repo *Repository `json:"repo,omitempty"`
- Actor *User `json:"actor,omitempty"`
- Org *Organization `json:"org,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- ID *string `json:"id,omitempty"`
-}
-
-func (e Event) String() string {
- return Stringify(e)
-}
-
-// Payload returns the parsed event payload. For recognized event types,
-// a value of the corresponding struct type will be returned.
-func (e *Event) Payload() (payload interface{}) {
- switch *e.Type {
- case "CommitCommentEvent":
- payload = &CommitCommentEvent{}
- case "CreateEvent":
- payload = &CreateEvent{}
- case "DeleteEvent":
- payload = &DeleteEvent{}
- case "DeploymentEvent":
- payload = &DeploymentEvent{}
- case "DeploymentStatusEvent":
- payload = &DeploymentStatusEvent{}
- case "ForkEvent":
- payload = &ForkEvent{}
- case "GollumEvent":
- payload = &GollumEvent{}
- case "IssueActivityEvent":
- payload = &IssueActivityEvent{}
- case "IssueCommentEvent":
- payload = &IssueCommentEvent{}
- case "IssuesEvent":
- payload = &IssuesEvent{}
- case "MemberEvent":
- payload = &MemberEvent{}
- case "MembershipEvent":
- payload = &MembershipEvent{}
- case "PageBuildEvent":
- payload = &PageBuildEvent{}
- case "PublicEvent":
- payload = &PublicEvent{}
- case "PullRequestEvent":
- payload = &PullRequestEvent{}
- case "PullRequestReviewCommentEvent":
- payload = &PullRequestReviewCommentEvent{}
- case "PushEvent":
- payload = &PushEvent{}
- case "ReleaseEvent":
- payload = &ReleaseEvent{}
- case "RepositoryEvent":
- payload = &RepositoryEvent{}
- case "StatusEvent":
- payload = &StatusEvent{}
- case "TeamAddEvent":
- payload = &TeamAddEvent{}
- case "WatchEvent":
- payload = &WatchEvent{}
- }
- if err := json.Unmarshal(*e.RawPayload, &payload); err != nil {
- panic(err.Error())
- }
- return payload
-}
-
-// ListEvents drinks from the firehose of all public events across GitHub.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/events/#list-public-events
-func (s *ActivityService) ListEvents(opt *ListOptions) ([]*Event, *Response, error) {
- u, err := addOptions("events", opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- events := new([]*Event)
- resp, err := s.client.Do(req, events)
- if err != nil {
- return nil, resp, err
- }
-
- return *events, resp, err
-}
-
-// ListRepositoryEvents lists events for a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/events/#list-repository-events
-func (s *ActivityService) ListRepositoryEvents(owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/events", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- events := new([]*Event)
- resp, err := s.client.Do(req, events)
- if err != nil {
- return nil, resp, err
- }
-
- return *events, resp, err
-}
-
-// ListIssueEventsForRepository lists issue events for a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/events/#list-issue-events-for-a-repository
-func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- events := new([]*Event)
- resp, err := s.client.Do(req, events)
- if err != nil {
- return nil, resp, err
- }
-
- return *events, resp, err
-}
-
-// ListEventsForRepoNetwork lists public events for a network of repositories.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/events/#list-public-events-for-a-network-of-repositories
-func (s *ActivityService) ListEventsForRepoNetwork(owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
- u := fmt.Sprintf("networks/%v/%v/events", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- events := new([]*Event)
- resp, err := s.client.Do(req, events)
- if err != nil {
- return nil, resp, err
- }
-
- return *events, resp, err
-}
-
-// ListEventsForOrganization lists public events for an organization.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/events/#list-public-events-for-an-organization
-func (s *ActivityService) ListEventsForOrganization(org string, opt *ListOptions) ([]*Event, *Response, error) {
- u := fmt.Sprintf("orgs/%v/events", org)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- events := new([]*Event)
- resp, err := s.client.Do(req, events)
- if err != nil {
- return nil, resp, err
- }
-
- return *events, resp, err
-}
-
-// ListEventsPerformedByUser lists the events performed by a user. If publicOnly is
-// true, only public events will be returned.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/events/#list-events-performed-by-a-user
-func (s *ActivityService) ListEventsPerformedByUser(user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) {
- var u string
- if publicOnly {
- u = fmt.Sprintf("users/%v/events/public", user)
- } else {
- u = fmt.Sprintf("users/%v/events", user)
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- events := new([]*Event)
- resp, err := s.client.Do(req, events)
- if err != nil {
- return nil, resp, err
- }
-
- return *events, resp, err
-}
-
-// ListEventsReceivedByUser lists the events received by a user. If publicOnly is
-// true, only public events will be returned.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/events/#list-events-that-a-user-has-received
-func (s *ActivityService) ListEventsReceivedByUser(user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) {
- var u string
- if publicOnly {
- u = fmt.Sprintf("users/%v/received_events/public", user)
- } else {
- u = fmt.Sprintf("users/%v/received_events", user)
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- events := new([]*Event)
- resp, err := s.client.Do(req, events)
- if err != nil {
- return nil, resp, err
- }
-
- return *events, resp, err
-}
-
-// ListUserEventsForOrganization provides the user’s organization dashboard. You
-// must be authenticated as the user to view this.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/events/#list-events-for-an-organization
-func (s *ActivityService) ListUserEventsForOrganization(org, user string, opt *ListOptions) ([]*Event, *Response, error) {
- u := fmt.Sprintf("users/%v/events/orgs/%v", user, org)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- events := new([]*Event)
- resp, err := s.client.Do(req, events)
- if err != nil {
- return nil, resp, err
- }
-
- return *events, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/activity_events_test.go b/vendor/src/github.com/google/go-github/github/activity_events_test.go
deleted file mode 100644
index f8ffea7..0000000
--- a/vendor/src/github.com/google/go-github/github/activity_events_test.go
+++ /dev/null
@@ -1,305 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestActivityService_ListEvents(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/events", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- events, _, err := client.Activity.ListEvents(opt)
- if err != nil {
- t.Errorf("Activities.ListEvents returned error: %v", err)
- }
-
- want := []*Event{{ID: String("1")}, {ID: String("2")}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Activities.ListEvents returned %+v, want %+v", events, want)
- }
-}
-
-func TestActivityService_ListRepositoryEvents(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/events", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- events, _, err := client.Activity.ListRepositoryEvents("o", "r", opt)
- if err != nil {
- t.Errorf("Activities.ListRepositoryEvents returned error: %v", err)
- }
-
- want := []*Event{{ID: String("1")}, {ID: String("2")}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Activities.ListRepositoryEvents returned %+v, want %+v", events, want)
- }
-}
-
-func TestActivityService_ListRepositoryEvents_invalidOwner(t *testing.T) {
- _, _, err := client.Activity.ListRepositoryEvents("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestActivityService_ListIssueEventsForRepository(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/events", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- events, _, err := client.Activity.ListIssueEventsForRepository("o", "r", opt)
- if err != nil {
- t.Errorf("Activities.ListIssueEventsForRepository returned error: %v", err)
- }
-
- want := []*Event{{ID: String("1")}, {ID: String("2")}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Activities.ListIssueEventsForRepository returned %+v, want %+v", events, want)
- }
-}
-
-func TestActivityService_ListIssueEventsForRepository_invalidOwner(t *testing.T) {
- _, _, err := client.Activity.ListIssueEventsForRepository("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestActivityService_ListEventsForRepoNetwork(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/networks/o/r/events", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- events, _, err := client.Activity.ListEventsForRepoNetwork("o", "r", opt)
- if err != nil {
- t.Errorf("Activities.ListEventsForRepoNetwork returned error: %v", err)
- }
-
- want := []*Event{{ID: String("1")}, {ID: String("2")}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Activities.ListEventsForRepoNetwork returned %+v, want %+v", events, want)
- }
-}
-
-func TestActivityService_ListEventsForRepoNetwork_invalidOwner(t *testing.T) {
- _, _, err := client.Activity.ListEventsForRepoNetwork("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestActivityService_ListEventsForOrganization(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/events", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- events, _, err := client.Activity.ListEventsForOrganization("o", opt)
- if err != nil {
- t.Errorf("Activities.ListEventsForOrganization returned error: %v", err)
- }
-
- want := []*Event{{ID: String("1")}, {ID: String("2")}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Activities.ListEventsForOrganization returned %+v, want %+v", events, want)
- }
-}
-
-func TestActivityService_ListEventsForOrganization_invalidOrg(t *testing.T) {
- _, _, err := client.Activity.ListEventsForOrganization("%", nil)
- testURLParseError(t, err)
-}
-
-func TestActivityService_ListEventsPerformedByUser_all(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/events", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- events, _, err := client.Activity.ListEventsPerformedByUser("u", false, opt)
- if err != nil {
- t.Errorf("Events.ListPerformedByUser returned error: %v", err)
- }
-
- want := []*Event{{ID: String("1")}, {ID: String("2")}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Events.ListPerformedByUser returned %+v, want %+v", events, want)
- }
-}
-
-func TestActivityService_ListEventsPerformedByUser_publicOnly(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/events/public", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
- })
-
- events, _, err := client.Activity.ListEventsPerformedByUser("u", true, nil)
- if err != nil {
- t.Errorf("Events.ListPerformedByUser returned error: %v", err)
- }
-
- want := []*Event{{ID: String("1")}, {ID: String("2")}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Events.ListPerformedByUser returned %+v, want %+v", events, want)
- }
-}
-
-func TestActivityService_ListEventsPerformedByUser_invalidUser(t *testing.T) {
- _, _, err := client.Activity.ListEventsPerformedByUser("%", false, nil)
- testURLParseError(t, err)
-}
-
-func TestActivityService_ListEventsReceivedByUser_all(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/received_events", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- events, _, err := client.Activity.ListEventsReceivedByUser("u", false, opt)
- if err != nil {
- t.Errorf("Events.ListReceivedByUser returned error: %v", err)
- }
-
- want := []*Event{{ID: String("1")}, {ID: String("2")}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Events.ListReceivedUser returned %+v, want %+v", events, want)
- }
-}
-
-func TestActivityService_ListEventsReceivedByUser_publicOnly(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/received_events/public", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
- })
-
- events, _, err := client.Activity.ListEventsReceivedByUser("u", true, nil)
- if err != nil {
- t.Errorf("Events.ListReceivedByUser returned error: %v", err)
- }
-
- want := []*Event{{ID: String("1")}, {ID: String("2")}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Events.ListReceivedByUser returned %+v, want %+v", events, want)
- }
-}
-
-func TestActivityService_ListEventsReceivedByUser_invalidUser(t *testing.T) {
- _, _, err := client.Activity.ListEventsReceivedByUser("%", false, nil)
- testURLParseError(t, err)
-}
-
-func TestActivityService_ListUserEventsForOrganization(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/events/orgs/o", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":"1"},{"id":"2"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- events, _, err := client.Activity.ListUserEventsForOrganization("o", "u", opt)
- if err != nil {
- t.Errorf("Activities.ListUserEventsForOrganization returned error: %v", err)
- }
-
- want := []*Event{{ID: String("1")}, {ID: String("2")}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Activities.ListUserEventsForOrganization returned %+v, want %+v", events, want)
- }
-}
-
-func TestActivity_EventPayload_typed(t *testing.T) {
- raw := []byte(`{"type": "PushEvent","payload":{"push_id": 1}}`)
- var event *Event
- if err := json.Unmarshal(raw, &event); err != nil {
- t.Fatalf("Unmarshal Event returned error: %v", err)
- }
-
- want := &PushEvent{PushID: Int(1)}
- if !reflect.DeepEqual(event.Payload(), want) {
- t.Errorf("Event Payload returned %+v, want %+v", event.Payload(), want)
- }
-}
-
-// TestEvent_Payload_untyped checks that unrecognized events are parsed to an
-// interface{} value (instead of being discarded or throwing an error), for
-// forward compatibility with new event types.
-func TestActivity_EventPayload_untyped(t *testing.T) {
- raw := []byte(`{"type": "UnrecognizedEvent","payload":{"field": "val"}}`)
- var event *Event
- if err := json.Unmarshal(raw, &event); err != nil {
- t.Fatalf("Unmarshal Event returned error: %v", err)
- }
-
- want := map[string]interface{}{"field": "val"}
- if !reflect.DeepEqual(event.Payload(), want) {
- t.Errorf("Event Payload returned %+v, want %+v", event.Payload(), want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/activity_notifications.go b/vendor/src/github.com/google/go-github/github/activity_notifications.go
deleted file mode 100644
index b538a7b..0000000
--- a/vendor/src/github.com/google/go-github/github/activity_notifications.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// Notification identifies a GitHub notification for a user.
-type Notification struct {
- ID *string `json:"id,omitempty"`
- Repository *Repository `json:"repository,omitempty"`
- Subject *NotificationSubject `json:"subject,omitempty"`
-
- // Reason identifies the event that triggered the notification.
- //
- // GitHub API Docs: https://developer.github.com/v3/activity/notifications/#notification-reasons
- Reason *string `json:"reason,omitempty"`
-
- Unread *bool `json:"unread,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
- LastReadAt *time.Time `json:"last_read_at,omitempty"`
- URL *string `json:"url,omitempty"`
-}
-
-// NotificationSubject identifies the subject of a notification.
-type NotificationSubject struct {
- Title *string `json:"title,omitempty"`
- URL *string `json:"url,omitempty"`
- LatestCommentURL *string `json:"latest_comment_url,omitempty"`
- Type *string `json:"type,omitempty"`
-}
-
-// NotificationListOptions specifies the optional parameters to the
-// ActivityService.ListNotifications method.
-type NotificationListOptions struct {
- All bool `url:"all,omitempty"`
- Participating bool `url:"participating,omitempty"`
- Since time.Time `url:"since,omitempty"`
- Before time.Time `url:"before,omitempty"`
-
- ListOptions
-}
-
-// ListNotifications lists all notifications for the authenticated user.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications
-func (s *ActivityService) ListNotifications(opt *NotificationListOptions) ([]*Notification, *Response, error) {
- u := fmt.Sprintf("notifications")
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var notifications []*Notification
- resp, err := s.client.Do(req, ¬ifications)
- if err != nil {
- return nil, resp, err
- }
-
- return notifications, resp, err
-}
-
-// ListRepositoryNotifications lists all notifications in a given repository
-// for the authenticated user.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications-in-a-repository
-func (s *ActivityService) ListRepositoryNotifications(owner, repo string, opt *NotificationListOptions) ([]*Notification, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var notifications []*Notification
- resp, err := s.client.Do(req, ¬ifications)
- if err != nil {
- return nil, resp, err
- }
-
- return notifications, resp, err
-}
-
-type markReadOptions struct {
- LastReadAt time.Time `json:"last_read_at,omitempty"`
-}
-
-// MarkNotificationsRead marks all notifications up to lastRead as read.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-as-read
-func (s *ActivityService) MarkNotificationsRead(lastRead time.Time) (*Response, error) {
- opts := &markReadOptions{
- LastReadAt: lastRead,
- }
- req, err := s.client.NewRequest("PUT", "notifications", opts)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// MarkRepositoryNotificationsRead marks all notifications up to lastRead in
-// the specified repository as read.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-notifications-as-read-in-a-repository
-func (s *ActivityService) MarkRepositoryNotificationsRead(owner, repo string, lastRead time.Time) (*Response, error) {
- opts := &markReadOptions{
- LastReadAt: lastRead,
- }
- u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo)
- req, err := s.client.NewRequest("PUT", u, opts)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// GetThread gets the specified notification thread.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#view-a-single-thread
-func (s *ActivityService) GetThread(id string) (*Notification, *Response, error) {
- u := fmt.Sprintf("notifications/threads/%v", id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- notification := new(Notification)
- resp, err := s.client.Do(req, notification)
- if err != nil {
- return nil, resp, err
- }
-
- return notification, resp, err
-}
-
-// MarkThreadRead marks the specified thread as read.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-a-thread-as-read
-func (s *ActivityService) MarkThreadRead(id string) (*Response, error) {
- u := fmt.Sprintf("notifications/threads/%v", id)
-
- req, err := s.client.NewRequest("PATCH", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// GetThreadSubscription checks to see if the authenticated user is subscribed
-// to a thread.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#get-a-thread-subscription
-func (s *ActivityService) GetThreadSubscription(id string) (*Subscription, *Response, error) {
- u := fmt.Sprintf("notifications/threads/%v/subscription", id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- sub := new(Subscription)
- resp, err := s.client.Do(req, sub)
- if err != nil {
- return nil, resp, err
- }
-
- return sub, resp, err
-}
-
-// SetThreadSubscription sets the subscription for the specified thread for the
-// authenticated user.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#set-a-thread-subscription
-func (s *ActivityService) SetThreadSubscription(id string, subscription *Subscription) (*Subscription, *Response, error) {
- u := fmt.Sprintf("notifications/threads/%v/subscription", id)
-
- req, err := s.client.NewRequest("PUT", u, subscription)
- if err != nil {
- return nil, nil, err
- }
-
- sub := new(Subscription)
- resp, err := s.client.Do(req, sub)
- if err != nil {
- return nil, resp, err
- }
-
- return sub, resp, err
-}
-
-// DeleteThreadSubscription deletes the subscription for the specified thread
-// for the authenticated user.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#delete-a-thread-subscription
-func (s *ActivityService) DeleteThreadSubscription(id string) (*Response, error) {
- u := fmt.Sprintf("notifications/threads/%v/subscription", id)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/activity_notifications_test.go b/vendor/src/github.com/google/go-github/github/activity_notifications_test.go
deleted file mode 100644
index 8f6a581..0000000
--- a/vendor/src/github.com/google/go-github/github/activity_notifications_test.go
+++ /dev/null
@@ -1,203 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
- "time"
-)
-
-func TestActivityService_ListNotification(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/notifications", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "all": "true",
- "participating": "true",
- "since": "2006-01-02T15:04:05Z",
- "before": "2007-03-04T15:04:05Z",
- })
-
- fmt.Fprint(w, `[{"id":"1", "subject":{"title":"t"}}]`)
- })
-
- opt := &NotificationListOptions{
- All: true,
- Participating: true,
- Since: time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC),
- Before: time.Date(2007, 03, 04, 15, 04, 05, 0, time.UTC),
- }
- notifications, _, err := client.Activity.ListNotifications(opt)
- if err != nil {
- t.Errorf("Activity.ListNotifications returned error: %v", err)
- }
-
- want := []*Notification{{ID: String("1"), Subject: &NotificationSubject{Title: String("t")}}}
- if !reflect.DeepEqual(notifications, want) {
- t.Errorf("Activity.ListNotifications returned %+v, want %+v", notifications, want)
- }
-}
-
-func TestActivityService_ListRepositoryNotification(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/notifications", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{"id":"1"}]`)
- })
-
- notifications, _, err := client.Activity.ListRepositoryNotifications("o", "r", nil)
- if err != nil {
- t.Errorf("Activity.ListRepositoryNotifications returned error: %v", err)
- }
-
- want := []*Notification{{ID: String("1")}}
- if !reflect.DeepEqual(notifications, want) {
- t.Errorf("Activity.ListRepositoryNotifications returned %+v, want %+v", notifications, want)
- }
-}
-
-func TestActivityService_MarkNotificationsRead(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/notifications", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- testHeader(t, r, "Content-Type", "application/json")
- testBody(t, r, `{"last_read_at":"2006-01-02T15:04:05Z"}`+"\n")
-
- w.WriteHeader(http.StatusResetContent)
- })
-
- _, err := client.Activity.MarkNotificationsRead(time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC))
- if err != nil {
- t.Errorf("Activity.MarkNotificationsRead returned error: %v", err)
- }
-}
-
-func TestActivityService_MarkRepositoryNotificationsRead(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/notifications", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- testHeader(t, r, "Content-Type", "application/json")
- testBody(t, r, `{"last_read_at":"2006-01-02T15:04:05Z"}`+"\n")
-
- w.WriteHeader(http.StatusResetContent)
- })
-
- _, err := client.Activity.MarkRepositoryNotificationsRead("o", "r", time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC))
- if err != nil {
- t.Errorf("Activity.MarkRepositoryNotificationsRead returned error: %v", err)
- }
-}
-
-func TestActivityService_GetThread(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/notifications/threads/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":"1"}`)
- })
-
- notification, _, err := client.Activity.GetThread("1")
- if err != nil {
- t.Errorf("Activity.GetThread returned error: %v", err)
- }
-
- want := &Notification{ID: String("1")}
- if !reflect.DeepEqual(notification, want) {
- t.Errorf("Activity.GetThread returned %+v, want %+v", notification, want)
- }
-}
-
-func TestActivityService_MarkThreadRead(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/notifications/threads/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PATCH")
- w.WriteHeader(http.StatusResetContent)
- })
-
- _, err := client.Activity.MarkThreadRead("1")
- if err != nil {
- t.Errorf("Activity.MarkThreadRead returned error: %v", err)
- }
-}
-
-func TestActivityService_GetThreadSubscription(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/notifications/threads/1/subscription", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"subscribed":true}`)
- })
-
- sub, _, err := client.Activity.GetThreadSubscription("1")
- if err != nil {
- t.Errorf("Activity.GetThreadSubscription returned error: %v", err)
- }
-
- want := &Subscription{Subscribed: Bool(true)}
- if !reflect.DeepEqual(sub, want) {
- t.Errorf("Activity.GetThreadSubscription returned %+v, want %+v", sub, want)
- }
-}
-
-func TestActivityService_SetThreadSubscription(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Subscription{Subscribed: Bool(true)}
-
- mux.HandleFunc("/notifications/threads/1/subscription", func(w http.ResponseWriter, r *http.Request) {
- v := new(Subscription)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PUT")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"ignored":true}`)
- })
-
- sub, _, err := client.Activity.SetThreadSubscription("1", input)
- if err != nil {
- t.Errorf("Activity.SetThreadSubscription returned error: %v", err)
- }
-
- want := &Subscription{Ignored: Bool(true)}
- if !reflect.DeepEqual(sub, want) {
- t.Errorf("Activity.SetThreadSubscription returned %+v, want %+v", sub, want)
- }
-}
-
-func TestActivityService_DeleteThreadSubscription(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/notifications/threads/1/subscription", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Activity.DeleteThreadSubscription("1")
- if err != nil {
- t.Errorf("Activity.DeleteThreadSubscription returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/activity_star.go b/vendor/src/github.com/google/go-github/github/activity_star.go
deleted file mode 100644
index 5df6814..0000000
--- a/vendor/src/github.com/google/go-github/github/activity_star.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// StarredRepository is returned by ListStarred.
-type StarredRepository struct {
- StarredAt *Timestamp `json:"starred_at,omitempty"`
- Repository *Repository `json:"repo,omitempty"`
-}
-
-// Stargazer represents a user that has starred a repository.
-type Stargazer struct {
- StarredAt *Timestamp `json:"starred_at,omitempty"`
- User *User `json:"user,omitempty"`
-}
-
-// ListStargazers lists people who have starred the specified repo.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/starring/#list-stargazers
-func (s *ActivityService) ListStargazers(owner, repo string, opt *ListOptions) ([]*Stargazer, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/stargazers", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeStarringPreview)
-
- stargazers := new([]*Stargazer)
- resp, err := s.client.Do(req, stargazers)
- if err != nil {
- return nil, resp, err
- }
-
- return *stargazers, resp, err
-}
-
-// ActivityListStarredOptions specifies the optional parameters to the
-// ActivityService.ListStarred method.
-type ActivityListStarredOptions struct {
- // How to sort the repository list. Possible values are: created, updated,
- // pushed, full_name. Default is "full_name".
- Sort string `url:"sort,omitempty"`
-
- // Direction in which to sort repositories. Possible values are: asc, desc.
- // Default is "asc" when sort is "full_name", otherwise default is "desc".
- Direction string `url:"direction,omitempty"`
-
- ListOptions
-}
-
-// ListStarred lists all the repos starred by a user. Passing the empty string
-// will list the starred repositories for the authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/starring/#list-repositories-being-starred
-func (s *ActivityService) ListStarred(user string, opt *ActivityListStarredOptions) ([]*StarredRepository, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("users/%v/starred", user)
- } else {
- u = "user/starred"
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeStarringPreview)
-
- repos := new([]*StarredRepository)
- resp, err := s.client.Do(req, repos)
- if err != nil {
- return nil, resp, err
- }
-
- return *repos, resp, err
-}
-
-// IsStarred checks if a repository is starred by authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/starring/#check-if-you-are-starring-a-repository
-func (s *ActivityService) IsStarred(owner, repo string) (bool, *Response, error) {
- u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return false, nil, err
- }
- resp, err := s.client.Do(req, nil)
- starred, err := parseBoolResponse(err)
- return starred, resp, err
-}
-
-// Star a repository as the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/starring/#star-a-repository
-func (s *ActivityService) Star(owner, repo string) (*Response, error) {
- u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
- req, err := s.client.NewRequest("PUT", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// Unstar a repository as the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/activity/starring/#unstar-a-repository
-func (s *ActivityService) Unstar(owner, repo string) (*Response, error) {
- u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/activity_star_test.go b/vendor/src/github.com/google/go-github/github/activity_star_test.go
deleted file mode 100644
index 4c0767a..0000000
--- a/vendor/src/github.com/google/go-github/github/activity_star_test.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
- "time"
-)
-
-func TestActivityService_ListStargazers(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/stargazers", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeStarringPreview)
- testFormValues(t, r, values{
- "page": "2",
- })
-
- fmt.Fprint(w, `[{"starred_at":"2002-02-10T15:30:00Z","user":{"id":1}}]`)
- })
-
- stargazers, _, err := client.Activity.ListStargazers("o", "r", &ListOptions{Page: 2})
- if err != nil {
- t.Errorf("Activity.ListStargazers returned error: %v", err)
- }
-
- want := []*Stargazer{{StarredAt: &Timestamp{time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC)}, User: &User{ID: Int(1)}}}
- if !reflect.DeepEqual(stargazers, want) {
- t.Errorf("Activity.ListStargazers returned %+v, want %+v", stargazers, want)
- }
-}
-
-func TestActivityService_ListStarred_authenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/starred", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeStarringPreview)
- fmt.Fprint(w, `[{"starred_at":"2002-02-10T15:30:00Z","repo":{"id":1}}]`)
- })
-
- repos, _, err := client.Activity.ListStarred("", nil)
- if err != nil {
- t.Errorf("Activity.ListStarred returned error: %v", err)
- }
-
- want := []*StarredRepository{{StarredAt: &Timestamp{time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC)}, Repository: &Repository{ID: Int(1)}}}
- if !reflect.DeepEqual(repos, want) {
- t.Errorf("Activity.ListStarred returned %+v, want %+v", repos, want)
- }
-}
-
-func TestActivityService_ListStarred_specifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/starred", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeStarringPreview)
- testFormValues(t, r, values{
- "sort": "created",
- "direction": "asc",
- "page": "2",
- })
- fmt.Fprint(w, `[{"starred_at":"2002-02-10T15:30:00Z","repo":{"id":2}}]`)
- })
-
- opt := &ActivityListStarredOptions{"created", "asc", ListOptions{Page: 2}}
- repos, _, err := client.Activity.ListStarred("u", opt)
- if err != nil {
- t.Errorf("Activity.ListStarred returned error: %v", err)
- }
-
- want := []*StarredRepository{{StarredAt: &Timestamp{time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC)}, Repository: &Repository{ID: Int(2)}}}
- if !reflect.DeepEqual(repos, want) {
- t.Errorf("Activity.ListStarred returned %+v, want %+v", repos, want)
- }
-}
-
-func TestActivityService_ListStarred_invalidUser(t *testing.T) {
- _, _, err := client.Activity.ListStarred("%", nil)
- testURLParseError(t, err)
-}
-
-func TestActivityService_IsStarred_hasStar(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNoContent)
- })
-
- star, _, err := client.Activity.IsStarred("o", "r")
- if err != nil {
- t.Errorf("Activity.IsStarred returned error: %v", err)
- }
- if want := true; star != want {
- t.Errorf("Activity.IsStarred returned %+v, want %+v", star, want)
- }
-}
-
-func TestActivityService_IsStarred_noStar(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNotFound)
- })
-
- star, _, err := client.Activity.IsStarred("o", "r")
- if err != nil {
- t.Errorf("Activity.IsStarred returned error: %v", err)
- }
- if want := false; star != want {
- t.Errorf("Activity.IsStarred returned %+v, want %+v", star, want)
- }
-}
-
-func TestActivityService_IsStarred_invalidID(t *testing.T) {
- _, _, err := client.Activity.IsStarred("%", "%")
- testURLParseError(t, err)
-}
-
-func TestActivityService_Star(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- })
-
- _, err := client.Activity.Star("o", "r")
- if err != nil {
- t.Errorf("Activity.Star returned error: %v", err)
- }
-}
-
-func TestActivityService_Star_invalidID(t *testing.T) {
- _, err := client.Activity.Star("%", "%")
- testURLParseError(t, err)
-}
-
-func TestActivityService_Unstar(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/starred/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Activity.Unstar("o", "r")
- if err != nil {
- t.Errorf("Activity.Unstar returned error: %v", err)
- }
-}
-
-func TestActivityService_Unstar_invalidID(t *testing.T) {
- _, err := client.Activity.Unstar("%", "%")
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/activity_test.go b/vendor/src/github.com/google/go-github/github/activity_test.go
deleted file mode 100644
index dc289e9..0000000
--- a/vendor/src/github.com/google/go-github/github/activity_test.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestActivityService_List(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/feeds", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
-
- w.WriteHeader(http.StatusOK)
- w.Write(feedsJSON)
- })
-
- got, _, err := client.Activity.ListFeeds()
- if err != nil {
- t.Errorf("Activity.ListFeeds returned error: %v", err)
- }
- if want := wantFeeds; !reflect.DeepEqual(got, want) {
- t.Errorf("Activity.ListFeeds = %+v, want %+v", got, want)
- }
-}
-
-var feedsJSON = []byte(`{
- "timeline_url": "https://github.com/timeline",
- "user_url": "https://github.com/{user}",
- "current_user_public_url": "https://github.com/defunkt",
- "current_user_url": "https://github.com/defunkt.private?token=abc123",
- "current_user_actor_url": "https://github.com/defunkt.private.actor?token=abc123",
- "current_user_organization_url": "",
- "current_user_organization_urls": [
- "https://github.com/organizations/github/defunkt.private.atom?token=abc123"
- ],
- "_links": {
- "timeline": {
- "href": "https://github.com/timeline",
- "type": "application/atom+xml"
- },
- "user": {
- "href": "https://github.com/{user}",
- "type": "application/atom+xml"
- },
- "current_user_public": {
- "href": "https://github.com/defunkt",
- "type": "application/atom+xml"
- },
- "current_user": {
- "href": "https://github.com/defunkt.private?token=abc123",
- "type": "application/atom+xml"
- },
- "current_user_actor": {
- "href": "https://github.com/defunkt.private.actor?token=abc123",
- "type": "application/atom+xml"
- },
- "current_user_organization": {
- "href": "",
- "type": ""
- },
- "current_user_organizations": [
- {
- "href": "https://github.com/organizations/github/defunkt.private.atom?token=abc123",
- "type": "application/atom+xml"
- }
- ]
- }
-}`)
-
-var wantFeeds = &Feeds{
- TimelineURL: String("https://github.com/timeline"),
- UserURL: String("https://github.com/{user}"),
- CurrentUserPublicURL: String("https://github.com/defunkt"),
- CurrentUserURL: String("https://github.com/defunkt.private?token=abc123"),
- CurrentUserActorURL: String("https://github.com/defunkt.private.actor?token=abc123"),
- CurrentUserOrganizationURL: String(""),
- CurrentUserOrganizationURLs: []string{
- "https://github.com/organizations/github/defunkt.private.atom?token=abc123",
- },
- Links: &struct {
- Timeline *FeedLink `json:"timeline,omitempty"`
- User *FeedLink `json:"user,omitempty"`
- CurrentUserPublic *FeedLink `json:"current_user_public,omitempty"`
- CurrentUser *FeedLink `json:"current_user,omitempty"`
- CurrentUserActor *FeedLink `json:"current_user_actor,omitempty"`
- CurrentUserOrganization *FeedLink `json:"current_user_organization,omitempty"`
- CurrentUserOrganizations []FeedLink `json:"current_user_organizations,omitempty"`
- }{
- Timeline: &FeedLink{
- HRef: String("https://github.com/timeline"),
- Type: String("application/atom+xml"),
- },
- User: &FeedLink{
- HRef: String("https://github.com/{user}"),
- Type: String("application/atom+xml"),
- },
- CurrentUserPublic: &FeedLink{
- HRef: String("https://github.com/defunkt"),
- Type: String("application/atom+xml"),
- },
- CurrentUser: &FeedLink{
- HRef: String("https://github.com/defunkt.private?token=abc123"),
- Type: String("application/atom+xml"),
- },
- CurrentUserActor: &FeedLink{
- HRef: String("https://github.com/defunkt.private.actor?token=abc123"),
- Type: String("application/atom+xml"),
- },
- CurrentUserOrganization: &FeedLink{
- HRef: String(""),
- Type: String(""),
- },
- CurrentUserOrganizations: []FeedLink{
- {
- HRef: String("https://github.com/organizations/github/defunkt.private.atom?token=abc123"),
- Type: String("application/atom+xml"),
- },
- },
- },
-}
diff --git a/vendor/src/github.com/google/go-github/github/activity_watching.go b/vendor/src/github.com/google/go-github/github/activity_watching.go
deleted file mode 100644
index 6bf91dd..0000000
--- a/vendor/src/github.com/google/go-github/github/activity_watching.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// Subscription identifies a repository or thread subscription.
-type Subscription struct {
- Subscribed *bool `json:"subscribed,omitempty"`
- Ignored *bool `json:"ignored,omitempty"`
- Reason *string `json:"reason,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- URL *string `json:"url,omitempty"`
-
- // only populated for repository subscriptions
- RepositoryURL *string `json:"repository_url,omitempty"`
-
- // only populated for thread subscriptions
- ThreadURL *string `json:"thread_url,omitempty"`
-}
-
-// ListWatchers lists watchers of a particular repo.
-//
-// GitHub API Docs: http://developer.github.com/v3/activity/watching/#list-watchers
-func (s *ActivityService) ListWatchers(owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/subscribers", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- watchers := new([]*User)
- resp, err := s.client.Do(req, watchers)
- if err != nil {
- return nil, resp, err
- }
-
- return *watchers, resp, err
-}
-
-// ListWatched lists the repositories the specified user is watching. Passing
-// the empty string will fetch watched repos for the authenticated user.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/watching/#list-repositories-being-watched
-func (s *ActivityService) ListWatched(user string, opt *ListOptions) ([]*Repository, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("users/%v/subscriptions", user)
- } else {
- u = "user/subscriptions"
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- watched := new([]*Repository)
- resp, err := s.client.Do(req, watched)
- if err != nil {
- return nil, resp, err
- }
-
- return *watched, resp, err
-}
-
-// GetRepositorySubscription returns the subscription for the specified
-// repository for the authenticated user. If the authenticated user is not
-// watching the repository, a nil Subscription is returned.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/watching/#get-a-repository-subscription
-func (s *ActivityService) GetRepositorySubscription(owner, repo string) (*Subscription, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- sub := new(Subscription)
- resp, err := s.client.Do(req, sub)
- if err != nil {
- // if it's just a 404, don't return that as an error
- _, err = parseBoolResponse(err)
- return nil, resp, err
- }
-
- return sub, resp, err
-}
-
-// SetRepositorySubscription sets the subscription for the specified repository
-// for the authenticated user.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/watching/#set-a-repository-subscription
-func (s *ActivityService) SetRepositorySubscription(owner, repo string, subscription *Subscription) (*Subscription, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
-
- req, err := s.client.NewRequest("PUT", u, subscription)
- if err != nil {
- return nil, nil, err
- }
-
- sub := new(Subscription)
- resp, err := s.client.Do(req, sub)
- if err != nil {
- return nil, resp, err
- }
-
- return sub, resp, err
-}
-
-// DeleteRepositorySubscription deletes the subscription for the specified
-// repository for the authenticated user.
-//
-// GitHub API Docs: https://developer.github.com/v3/activity/watching/#delete-a-repository-subscription
-func (s *ActivityService) DeleteRepositorySubscription(owner, repo string) (*Response, error) {
- u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/activity_watching_test.go b/vendor/src/github.com/google/go-github/github/activity_watching_test.go
deleted file mode 100644
index 8989fd3..0000000
--- a/vendor/src/github.com/google/go-github/github/activity_watching_test.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestActivityService_ListWatchers(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/subscribers", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
-
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- watchers, _, err := client.Activity.ListWatchers("o", "r", &ListOptions{Page: 2})
- if err != nil {
- t.Errorf("Activity.ListWatchers returned error: %v", err)
- }
-
- want := []*User{{ID: Int(1)}}
- if !reflect.DeepEqual(watchers, want) {
- t.Errorf("Activity.ListWatchers returned %+v, want %+v", watchers, want)
- }
-}
-
-func TestActivityService_ListWatched_authenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/subscriptions", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- watched, _, err := client.Activity.ListWatched("", &ListOptions{Page: 2})
- if err != nil {
- t.Errorf("Activity.ListWatched returned error: %v", err)
- }
-
- want := []*Repository{{ID: Int(1)}}
- if !reflect.DeepEqual(watched, want) {
- t.Errorf("Activity.ListWatched returned %+v, want %+v", watched, want)
- }
-}
-
-func TestActivityService_ListWatched_specifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/subscriptions", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- watched, _, err := client.Activity.ListWatched("u", &ListOptions{Page: 2})
- if err != nil {
- t.Errorf("Activity.ListWatched returned error: %v", err)
- }
-
- want := []*Repository{{ID: Int(1)}}
- if !reflect.DeepEqual(watched, want) {
- t.Errorf("Activity.ListWatched returned %+v, want %+v", watched, want)
- }
-}
-
-func TestActivityService_GetRepositorySubscription_true(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"subscribed":true}`)
- })
-
- sub, _, err := client.Activity.GetRepositorySubscription("o", "r")
- if err != nil {
- t.Errorf("Activity.GetRepositorySubscription returned error: %v", err)
- }
-
- want := &Subscription{Subscribed: Bool(true)}
- if !reflect.DeepEqual(sub, want) {
- t.Errorf("Activity.GetRepositorySubscription returned %+v, want %+v", sub, want)
- }
-}
-
-func TestActivityService_GetRepositorySubscription_false(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNotFound)
- })
-
- sub, _, err := client.Activity.GetRepositorySubscription("o", "r")
- if err != nil {
- t.Errorf("Activity.GetRepositorySubscription returned error: %v", err)
- }
-
- var want *Subscription
- if !reflect.DeepEqual(sub, want) {
- t.Errorf("Activity.GetRepositorySubscription returned %+v, want %+v", sub, want)
- }
-}
-
-func TestActivityService_GetRepositorySubscription_error(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusBadRequest)
- })
-
- _, _, err := client.Activity.GetRepositorySubscription("o", "r")
- if err == nil {
- t.Errorf("Expected HTTP 400 response")
- }
-}
-
-func TestActivityService_SetRepositorySubscription(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Subscription{Subscribed: Bool(true)}
-
- mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
- v := new(Subscription)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PUT")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"ignored":true}`)
- })
-
- sub, _, err := client.Activity.SetRepositorySubscription("o", "r", input)
- if err != nil {
- t.Errorf("Activity.SetRepositorySubscription returned error: %v", err)
- }
-
- want := &Subscription{Ignored: Bool(true)}
- if !reflect.DeepEqual(sub, want) {
- t.Errorf("Activity.SetRepositorySubscription returned %+v, want %+v", sub, want)
- }
-}
-
-func TestActivityService_DeleteRepositorySubscription(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/subscription", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Activity.DeleteRepositorySubscription("o", "r")
- if err != nil {
- t.Errorf("Activity.DeleteRepositorySubscription returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/authorizations.go b/vendor/src/github.com/google/go-github/github/authorizations.go
deleted file mode 100644
index 58fcc4e..0000000
--- a/vendor/src/github.com/google/go-github/github/authorizations.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// Copyright 2015 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// Scope models a GitHub authorization scope.
-//
-// GitHub API docs:https://developer.github.com/v3/oauth/#scopes
-type Scope string
-
-// This is the set of scopes for GitHub API V3
-const (
- ScopeNone Scope = "(no scope)" // REVISIT: is this actually returned, or just a documentation artifact?
- ScopeUser Scope = "user"
- ScopeUserEmail Scope = "user:email"
- ScopeUserFollow Scope = "user:follow"
- ScopePublicRepo Scope = "public_repo"
- ScopeRepo Scope = "repo"
- ScopeRepoDeployment Scope = "repo_deployment"
- ScopeRepoStatus Scope = "repo:status"
- ScopeDeleteRepo Scope = "delete_repo"
- ScopeNotifications Scope = "notifications"
- ScopeGist Scope = "gist"
- ScopeReadRepoHook Scope = "read:repo_hook"
- ScopeWriteRepoHook Scope = "write:repo_hook"
- ScopeAdminRepoHook Scope = "admin:repo_hook"
- ScopeAdminOrgHook Scope = "admin:org_hook"
- ScopeReadOrg Scope = "read:org"
- ScopeWriteOrg Scope = "write:org"
- ScopeAdminOrg Scope = "admin:org"
- ScopeReadPublicKey Scope = "read:public_key"
- ScopeWritePublicKey Scope = "write:public_key"
- ScopeAdminPublicKey Scope = "admin:public_key"
- ScopeReadGPGKey Scope = "read:gpg_key"
- ScopeWriteGPGKey Scope = "write:gpg_key"
- ScopeAdminGPGKey Scope = "admin:gpg_key"
-)
-
-// AuthorizationsService handles communication with the authorization related
-// methods of the GitHub API.
-//
-// This service requires HTTP Basic Authentication; it cannot be accessed using
-// an OAuth token.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/
-type AuthorizationsService service
-
-// Authorization represents an individual GitHub authorization.
-type Authorization struct {
- ID *int `json:"id,omitempty"`
- URL *string `json:"url,omitempty"`
- Scopes []Scope `json:"scopes,omitempty"`
- Token *string `json:"token,omitempty"`
- TokenLastEight *string `json:"token_last_eight,omitempty"`
- HashedToken *string `json:"hashed_token,omitempty"`
- App *AuthorizationApp `json:"app,omitempty"`
- Note *string `json:"note,omitempty"`
- NoteURL *string `json:"note_url,omitempty"`
- UpdateAt *Timestamp `json:"updated_at,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- Fingerprint *string `json:"fingerprint,omitempty"`
-
- // User is only populated by the Check and Reset methods.
- User *User `json:"user,omitempty"`
-}
-
-func (a Authorization) String() string {
- return Stringify(a)
-}
-
-// AuthorizationApp represents an individual GitHub app (in the context of authorization).
-type AuthorizationApp struct {
- URL *string `json:"url,omitempty"`
- Name *string `json:"name,omitempty"`
- ClientID *string `json:"client_id,omitempty"`
-}
-
-func (a AuthorizationApp) String() string {
- return Stringify(a)
-}
-
-// Grant represents an OAuth application that has been granted access to an account.
-type Grant struct {
- ID *int `json:"id,omitempty"`
- URL *string `json:"url,omitempty"`
- App *AuthorizationApp `json:"app,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- Scopes []string `json:"scopes,omitempty"`
-}
-
-func (g Grant) String() string {
- return Stringify(g)
-}
-
-// AuthorizationRequest represents a request to create an authorization.
-type AuthorizationRequest struct {
- Scopes []Scope `json:"scopes,omitempty"`
- Note *string `json:"note,omitempty"`
- NoteURL *string `json:"note_url,omitempty"`
- ClientID *string `json:"client_id,omitempty"`
- ClientSecret *string `json:"client_secret,omitempty"`
- Fingerprint *string `json:"fingerprint,omitempty"`
-}
-
-func (a AuthorizationRequest) String() string {
- return Stringify(a)
-}
-
-// AuthorizationUpdateRequest represents a request to update an authorization.
-//
-// Note that for any one update, you must only provide one of the "scopes"
-// fields. That is, you may provide only one of "Scopes", or "AddScopes", or
-// "RemoveScopes".
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#update-an-existing-authorization
-type AuthorizationUpdateRequest struct {
- Scopes []string `json:"scopes,omitempty"`
- AddScopes []string `json:"add_scopes,omitempty"`
- RemoveScopes []string `json:"remove_scopes,omitempty"`
- Note *string `json:"note,omitempty"`
- NoteURL *string `json:"note_url,omitempty"`
- Fingerprint *string `json:"fingerprint,omitempty"`
-}
-
-func (a AuthorizationUpdateRequest) String() string {
- return Stringify(a)
-}
-
-// List the authorizations for the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-authorizations
-func (s *AuthorizationsService) List(opt *ListOptions) ([]*Authorization, *Response, error) {
- u := "authorizations"
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- auths := new([]*Authorization)
- resp, err := s.client.Do(req, auths)
- if err != nil {
- return nil, resp, err
- }
- return *auths, resp, err
-}
-
-// Get a single authorization.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#get-a-single-authorization
-func (s *AuthorizationsService) Get(id int) (*Authorization, *Response, error) {
- u := fmt.Sprintf("authorizations/%d", id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- a := new(Authorization)
- resp, err := s.client.Do(req, a)
- if err != nil {
- return nil, resp, err
- }
- return a, resp, err
-}
-
-// Create a new authorization for the specified OAuth application.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#create-a-new-authorization
-func (s *AuthorizationsService) Create(auth *AuthorizationRequest) (*Authorization, *Response, error) {
- u := "authorizations"
-
- req, err := s.client.NewRequest("POST", u, auth)
- if err != nil {
- return nil, nil, err
- }
-
- a := new(Authorization)
- resp, err := s.client.Do(req, a)
- if err != nil {
- return nil, resp, err
- }
- return a, resp, err
-}
-
-// GetOrCreateForApp creates a new authorization for the specified OAuth
-// application, only if an authorization for that application doesn’t already
-// exist for the user.
-//
-// If a new token is created, the HTTP status code will be "201 Created", and
-// the returned Authorization.Token field will be populated. If an existing
-// token is returned, the status code will be "200 OK" and the
-// Authorization.Token field will be empty.
-//
-// clientID is the OAuth Client ID with which to create the token.
-//
-// GitHub API docs:
-// - https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app
-// - https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app-and-fingerprint
-func (s *AuthorizationsService) GetOrCreateForApp(clientID string, auth *AuthorizationRequest) (*Authorization, *Response, error) {
- var u string
- if auth.Fingerprint == nil || *auth.Fingerprint == "" {
- u = fmt.Sprintf("authorizations/clients/%v", clientID)
- } else {
- u = fmt.Sprintf("authorizations/clients/%v/%v", clientID, *auth.Fingerprint)
- }
-
- req, err := s.client.NewRequest("PUT", u, auth)
- if err != nil {
- return nil, nil, err
- }
-
- a := new(Authorization)
- resp, err := s.client.Do(req, a)
- if err != nil {
- return nil, resp, err
- }
-
- return a, resp, err
-}
-
-// Edit a single authorization.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#update-an-existing-authorization
-func (s *AuthorizationsService) Edit(id int, auth *AuthorizationUpdateRequest) (*Authorization, *Response, error) {
- u := fmt.Sprintf("authorizations/%d", id)
-
- req, err := s.client.NewRequest("PATCH", u, auth)
- if err != nil {
- return nil, nil, err
- }
-
- a := new(Authorization)
- resp, err := s.client.Do(req, a)
- if err != nil {
- return nil, resp, err
- }
-
- return a, resp, err
-}
-
-// Delete a single authorization.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#delete-an-authorization
-func (s *AuthorizationsService) Delete(id int) (*Response, error) {
- u := fmt.Sprintf("authorizations/%d", id)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// Check if an OAuth token is valid for a specific app.
-//
-// Note that this operation requires the use of BasicAuth, but where the
-// username is the OAuth application clientID, and the password is its
-// clientSecret. Invalid tokens will return a 404 Not Found.
-//
-// The returned Authorization.User field will be populated.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#check-an-authorization
-func (s *AuthorizationsService) Check(clientID string, token string) (*Authorization, *Response, error) {
- u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- a := new(Authorization)
- resp, err := s.client.Do(req, a)
- if err != nil {
- return nil, resp, err
- }
-
- return a, resp, err
-}
-
-// Reset is used to reset a valid OAuth token without end user involvement.
-// Applications must save the "token" property in the response, because changes
-// take effect immediately.
-//
-// Note that this operation requires the use of BasicAuth, but where the
-// username is the OAuth application clientID, and the password is its
-// clientSecret. Invalid tokens will return a 404 Not Found.
-//
-// The returned Authorization.User field will be populated.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#reset-an-authorization
-func (s *AuthorizationsService) Reset(clientID string, token string) (*Authorization, *Response, error) {
- u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
-
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- a := new(Authorization)
- resp, err := s.client.Do(req, a)
- if err != nil {
- return nil, resp, err
- }
-
- return a, resp, err
-}
-
-// Revoke an authorization for an application.
-//
-// Note that this operation requires the use of BasicAuth, but where the
-// username is the OAuth application clientID, and the password is its
-// clientSecret. Invalid tokens will return a 404 Not Found.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#revoke-an-authorization-for-an-application
-func (s *AuthorizationsService) Revoke(clientID string, token string) (*Response, error) {
- u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// ListGrants lists the set of OAuth applications that have been granted
-// access to a user's account. This will return one entry for each application
-// that has been granted access to the account, regardless of the number of
-// tokens an application has generated for the user.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-grants
-func (s *AuthorizationsService) ListGrants() ([]*Grant, *Response, error) {
- req, err := s.client.NewRequest("GET", "applications/grants", nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeOAuthGrantAuthorizationsPreview)
-
- grants := []*Grant{}
- resp, err := s.client.Do(req, &grants)
- if err != nil {
- return nil, resp, err
- }
-
- return grants, resp, err
-}
-
-// GetGrant gets a single OAuth application grant.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#get-a-single-grant
-func (s *AuthorizationsService) GetGrant(id int) (*Grant, *Response, error) {
- u := fmt.Sprintf("applications/grants/%d", id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeOAuthGrantAuthorizationsPreview)
-
- grant := new(Grant)
- resp, err := s.client.Do(req, grant)
- if err != nil {
- return nil, resp, err
- }
-
- return grant, resp, err
-}
-
-// DeleteGrant deletes an OAuth application grant. Deleting an application's
-// grant will also delete all OAuth tokens associated with the application for
-// the user.
-//
-// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#delete-a-grant
-func (s *AuthorizationsService) DeleteGrant(id int) (*Response, error) {
- u := fmt.Sprintf("applications/grants/%d", id)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeOAuthGrantAuthorizationsPreview)
-
- return s.client.Do(req, nil)
-}
-
-// Create an impersonation OAuth token.
-//
-// This requires admin permissions. With the returned Authorization.Token
-// you can e.g. create or delete a user's public SSH key. NOTE: creating a
-// new token automatically revokes an existing one.
-//
-// GitHub API docs: https://developer.github.com/enterprise/2.5/v3/users/administration/#create-an-impersonation-oauth-token
-func (s *AuthorizationsService) CreateImpersonation(username string, authReq *AuthorizationRequest) (*Authorization, *Response, error) {
- u := fmt.Sprintf("admin/users/%v/authorizations", username)
- req, err := s.client.NewRequest("POST", u, authReq)
- if err != nil {
- return nil, nil, err
- }
-
- a := new(Authorization)
- resp, err := s.client.Do(req, a)
- if err != nil {
- return nil, resp, err
- }
- return a, resp, err
-}
-
-// Delete an impersonation OAuth token.
-//
-// NOTE: there can be only one at a time.
-//
-// GitHub API docs: https://developer.github.com/enterprise/2.5/v3/users/administration/#delete-an-impersonation-oauth-token
-func (s *AuthorizationsService) DeleteImpersonation(username string) (*Response, error) {
- u := fmt.Sprintf("admin/users/%v/authorizations", username)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/authorizations_test.go b/vendor/src/github.com/google/go-github/github/authorizations_test.go
deleted file mode 100644
index 90a5324..0000000
--- a/vendor/src/github.com/google/go-github/github/authorizations_test.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2015 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestAuthorizationsService_List(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/authorizations", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "1", "per_page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 1, PerPage: 2}
- got, _, err := client.Authorizations.List(opt)
- if err != nil {
- t.Errorf("Authorizations.List returned error: %v", err)
- }
-
- want := []*Authorization{{ID: Int(1)}}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Authorizations.List returned %+v, want %+v", *got[0].ID, *want[0].ID)
- }
-}
-
-func TestAuthorizationsService_Get(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/authorizations/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- got, _, err := client.Authorizations.Get(1)
- if err != nil {
- t.Errorf("Authorizations.Get returned error: %v", err)
- }
-
- want := &Authorization{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Authorizations.Get returned auth %+v, want %+v", got, want)
- }
-}
-
-func TestAuthorizationsService_Create(t *testing.T) {
- setup()
- defer teardown()
-
- input := &AuthorizationRequest{
- Note: String("test"),
- }
-
- mux.HandleFunc("/authorizations", func(w http.ResponseWriter, r *http.Request) {
- v := new(AuthorizationRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"ID":1}`)
- })
-
- got, _, err := client.Authorizations.Create(input)
- if err != nil {
- t.Errorf("Authorizations.Create returned error: %v", err)
- }
-
- want := &Authorization{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Authorization.Create returned %+v, want %+v", got, want)
- }
-}
-
-func TestAuthorizationsService_GetOrCreateForApp(t *testing.T) {
- setup()
- defer teardown()
-
- input := &AuthorizationRequest{
- Note: String("test"),
- }
-
- mux.HandleFunc("/authorizations/clients/id", func(w http.ResponseWriter, r *http.Request) {
- v := new(AuthorizationRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PUT")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"ID":1}`)
- })
-
- got, _, err := client.Authorizations.GetOrCreateForApp("id", input)
- if err != nil {
- t.Errorf("Authorizations.GetOrCreateForApp returned error: %v", err)
- }
-
- want := &Authorization{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Authorization.GetOrCreateForApp returned %+v, want %+v", got, want)
- }
-}
-
-func TestAuthorizationsService_GetOrCreateForApp_Fingerprint(t *testing.T) {
- setup()
- defer teardown()
-
- input := &AuthorizationRequest{
- Note: String("test"),
- Fingerprint: String("fp"),
- }
-
- mux.HandleFunc("/authorizations/clients/id/fp", func(w http.ResponseWriter, r *http.Request) {
- v := new(AuthorizationRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PUT")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"ID":1}`)
- })
-
- got, _, err := client.Authorizations.GetOrCreateForApp("id", input)
- if err != nil {
- t.Errorf("Authorizations.GetOrCreateForApp returned error: %v", err)
- }
-
- want := &Authorization{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Authorization.GetOrCreateForApp returned %+v, want %+v", got, want)
- }
-}
-
-func TestAuthorizationsService_Edit(t *testing.T) {
- setup()
- defer teardown()
-
- input := &AuthorizationUpdateRequest{
- Note: String("test"),
- }
-
- mux.HandleFunc("/authorizations/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(AuthorizationUpdateRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"ID":1}`)
- })
-
- got, _, err := client.Authorizations.Edit(1, input)
- if err != nil {
- t.Errorf("Authorizations.Edit returned error: %v", err)
- }
-
- want := &Authorization{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Authorization.Update returned %+v, want %+v", got, want)
- }
-}
-
-func TestAuthorizationsService_Delete(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/authorizations/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Authorizations.Delete(1)
- if err != nil {
- t.Errorf("Authorizations.Delete returned error: %v", err)
- }
-}
-
-func TestAuthorizationsService_Check(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/applications/id/tokens/t", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- got, _, err := client.Authorizations.Check("id", "t")
- if err != nil {
- t.Errorf("Authorizations.Check returned error: %v", err)
- }
-
- want := &Authorization{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Authorizations.Check returned auth %+v, want %+v", got, want)
- }
-}
-
-func TestAuthorizationsService_Reset(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/applications/id/tokens/t", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- fmt.Fprint(w, `{"ID":1}`)
- })
-
- got, _, err := client.Authorizations.Reset("id", "t")
- if err != nil {
- t.Errorf("Authorizations.Reset returned error: %v", err)
- }
-
- want := &Authorization{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Authorizations.Reset returned auth %+v, want %+v", got, want)
- }
-}
-
-func TestAuthorizationsService_Revoke(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/applications/id/tokens/t", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Authorizations.Revoke("id", "t")
- if err != nil {
- t.Errorf("Authorizations.Revoke returned error: %v", err)
- }
-}
-
-func TestListGrants(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/applications/grants", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeOAuthGrantAuthorizationsPreview)
- fmt.Fprint(w, `[{"id": 1}]`)
- })
-
- got, _, err := client.Authorizations.ListGrants()
- if err != nil {
- t.Errorf("OAuthAuthorizations.ListGrants returned error: %v", err)
- }
-
- want := []*Grant{{ID: Int(1)}}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("OAuthAuthorizations.ListGrants = %+v, want %+v", got, want)
- }
-}
-
-func TestGetGrant(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/applications/grants/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeOAuthGrantAuthorizationsPreview)
- fmt.Fprint(w, `{"id": 1}`)
- })
-
- got, _, err := client.Authorizations.GetGrant(1)
- if err != nil {
- t.Errorf("OAuthAuthorizations.GetGrant returned error: %v", err)
- }
-
- want := &Grant{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("OAuthAuthorizations.GetGrant = %+v, want %+v", got, want)
- }
-}
-
-func TestDeleteGrant(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/applications/grants/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- testHeader(t, r, "Accept", mediaTypeOAuthGrantAuthorizationsPreview)
- })
-
- _, err := client.Authorizations.DeleteGrant(1)
- if err != nil {
- t.Errorf("OAuthAuthorizations.DeleteGrant returned error: %v", err)
- }
-}
-
-func TestAuthorizationsService_CreateImpersonation(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/admin/users/u/authorizations", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- req := &AuthorizationRequest{Scopes: []Scope{ScopePublicRepo}}
- got, _, err := client.Authorizations.CreateImpersonation("u", req)
- if err != nil {
- t.Errorf("Authorizations.CreateImpersonation returned error: %+v", err)
- }
-
- want := &Authorization{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Authorizations.CreateImpersonation returned %+v, want %+v", *got.ID, *want.ID)
- }
-}
-
-func TestAuthorizationsService_DeleteImpersonation(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/admin/users/u/authorizations", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Authorizations.DeleteImpersonation("u")
- if err != nil {
- t.Errorf("Authorizations.DeleteImpersonation returned error: %+v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/doc.go b/vendor/src/github.com/google/go-github/github/doc.go
deleted file mode 100644
index ba7b089..0000000
--- a/vendor/src/github.com/google/go-github/github/doc.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package github provides a client for using the GitHub API.
-
-Usage:
-
- import "github.com/google/go-github/github"
-
-Construct a new GitHub client, then use the various services on the client to
-access different parts of the GitHub API. For example:
-
- client := github.NewClient(nil)
-
- // list all organizations for user "willnorris"
- orgs, _, err := client.Organizations.List("willnorris", nil)
-
-Some API methods have optional parameters that can be passed. For example:
-
- client := github.NewClient(nil)
-
- // list recently updated repositories for org "github"
- opt := &github.RepositoryListByOrgOptions{Sort: "updated"}
- repos, _, err := client.Repositories.ListByOrg("github", opt)
-
-The services of a client divide the API into logical chunks and correspond to
-the structure of the GitHub API documentation at
-http://developer.github.com/v3/.
-
-Authentication
-
-The go-github library does not directly handle authentication. Instead, when
-creating a new client, pass an http.Client that can handle authentication for
-you. The easiest and recommended way to do this is using the golang.org/x/oauth2
-library, but you can always use any other library that provides an http.Client.
-If you have an OAuth2 access token (for example, a personal API token), you can
-use it with the oauth2 library using:
-
- import "golang.org/x/oauth2"
-
- func main() {
- ts := oauth2.StaticTokenSource(
- &oauth2.Token{AccessToken: "... your access token ..."},
- )
- tc := oauth2.NewClient(oauth2.NoContext, ts)
-
- client := github.NewClient(tc)
-
- // list all repositories for the authenticated user
- repos, _, err := client.Repositories.List("", nil)
- }
-
-Note that when using an authenticated Client, all calls made by the client will
-include the specified OAuth token. Therefore, authenticated clients should
-almost never be shared between different users.
-
-See the oauth2 docs for complete instructions on using that library.
-
-For API methods that require HTTP Basic Authentication, use the
-BasicAuthTransport.
-
-Rate Limiting
-
-GitHub imposes a rate limit on all API clients. Unauthenticated clients are
-limited to 60 requests per hour, while authenticated clients can make up to
-5,000 requests per hour. To receive the higher rate limit when making calls
-that are not issued on behalf of a user, use the
-UnauthenticatedRateLimitedTransport.
-
-The Rate method on a client returns the rate limit information based on the most
-recent API call. This is updated on every call, but may be out of date if it's
-been some time since the last API call and other clients have made subsequent
-requests since then. You can always call RateLimits() directly to get the most
-up-to-date rate limit data for the client.
-
-To detect an API rate limit error, you can check if its type is *github.RateLimitError:
-
- repos, _, err := client.Repositories.List("", nil)
- if _, ok := err.(*github.RateLimitError); ok {
- log.Println("hit rate limit")
- }
-
-Learn more about GitHub rate limiting at
-http://developer.github.com/v3/#rate-limiting.
-
-Conditional Requests
-
-The GitHub API has good support for conditional requests which will help
-prevent you from burning through your rate limit, as well as help speed up your
-application. go-github does not handle conditional requests directly, but is
-instead designed to work with a caching http.Transport. We recommend using
-https://github.com/gregjones/httpcache for that.
-
-Learn more about GitHub conditional requests at
-https://developer.github.com/v3/#conditional-requests.
-
-Creating and Updating Resources
-
-All structs for GitHub resources use pointer values for all non-repeated fields.
-This allows distinguishing between unset fields and those set to a zero-value.
-Helper functions have been provided to easily create these pointers for string,
-bool, and int values. For example:
-
- // create a new private repository named "foo"
- repo := &github.Repository{
- Name: github.String("foo"),
- Private: github.Bool(true),
- }
- client.Repositories.Create("", repo)
-
-Users who have worked with protocol buffers should find this pattern familiar.
-
-Pagination
-
-All requests for resource collections (repos, pull requests, issues, etc.)
-support pagination. Pagination options are described in the
-github.ListOptions struct and passed to the list methods directly or as an
-embedded type of a more specific list options struct (for example
-github.PullRequestListOptions). Pages information is available via the
-github.Response struct.
-
- client := github.NewClient(nil)
-
- opt := &github.RepositoryListByOrgOptions{
- ListOptions: github.ListOptions{PerPage: 10},
- }
- // get all pages of results
- var allRepos []*github.Repository
- for {
- repos, resp, err := client.Repositories.ListByOrg("github", opt)
- if err != nil {
- return err
- }
- allRepos = append(allRepos, repos...)
- if resp.NextPage == 0 {
- break
- }
- opt.ListOptions.Page = resp.NextPage
- }
-
-*/
-package github
diff --git a/vendor/src/github.com/google/go-github/github/event_types.go b/vendor/src/github.com/google/go-github/github/event_types.go
deleted file mode 100644
index f3e163d..0000000
--- a/vendor/src/github.com/google/go-github/github/event_types.go
+++ /dev/null
@@ -1,467 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// These event types are shared between the Events API and used as Webhook payloads.
-
-package github
-
-// CommitCommentEvent is triggered when a commit comment is created.
-// The Webhook event name is "commit_comment".
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#commitcommentevent
-type CommitCommentEvent struct {
- Comment *RepositoryComment `json:"comment,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Action *string `json:"action,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// CreateEvent represents a created repository, branch, or tag.
-// The Webhook event name is "create".
-//
-// Note: webhooks will not receive this event for created repositories.
-// Additionally, webhooks will not receive this event for tags if more
-// than three tags are pushed at once.
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#createevent
-type CreateEvent struct {
- Ref *string `json:"ref,omitempty"`
- // RefType is the object that was created. Possible values are: "repository", "branch", "tag".
- RefType *string `json:"ref_type,omitempty"`
- MasterBranch *string `json:"master_branch,omitempty"`
- Description *string `json:"description,omitempty"`
-
- // The following fields are only populated by Webhook events.
- PusherType *string `json:"pusher_type,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// DeleteEvent represents a deleted branch or tag.
-// The Webhook event name is "delete".
-//
-// Note: webhooks will not receive this event for tags if more than three tags
-// are deleted at once.
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#deleteevent
-type DeleteEvent struct {
- Ref *string `json:"ref,omitempty"`
- // RefType is the object that was deleted. Possible values are: "branch", "tag".
- RefType *string `json:"ref_type,omitempty"`
-
- // The following fields are only populated by Webhook events.
- PusherType *string `json:"pusher_type,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// DeploymentEvent represents a deployment.
-// The Webhook event name is "deployment".
-//
-// Events of this type are not visible in timelines, they are only used to trigger hooks.
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#deploymentevent
-type DeploymentEvent struct {
- Deployment *Deployment `json:"deployment,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Sender *User `json:"sender,omitempty"`
-}
-
-// DeploymentStatusEvent represents a deployment status.
-// The Webhook event name is "deployment_status".
-//
-// Events of this type are not visible in timelines, they are only used to trigger hooks.
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#deploymentstatusevent
-type DeploymentStatusEvent struct {
- Deployment *Deployment `json:"deployment,omitempty"`
- DeploymentStatus *DeploymentStatus `json:"deployment_status,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Sender *User `json:"sender,omitempty"`
-}
-
-// ForkEvent is triggered when a user forks a repository.
-// The Webhook event name is "fork".
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#forkevent
-type ForkEvent struct {
- // Forkee is the created repository.
- Forkee *Repository `json:"forkee,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// Page represents a single Wiki page.
-type Page struct {
- PageName *string `json:"page_name,omitempty"`
- Title *string `json:"title,omitempty"`
- Summary *string `json:"summary,omitempty"`
- Action *string `json:"action,omitempty"`
- SHA *string `json:"sha,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
-}
-
-// GollumEvent is triggered when a Wiki page is created or updated.
-// The Webhook event name is "gollum".
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#gollumevent
-type GollumEvent struct {
- Pages []*Page `json:"pages,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// DEPRECATED: IssueActivityEvent represents the payload delivered by Issue webhook
-// Use IssuesEvent instead.
-type IssueActivityEvent struct {
- Action *string `json:"action,omitempty"`
- Issue *Issue `json:"issue,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// EditChange represents the changes when an issue, pull request, or comment has
-// been edited.
-type EditChange struct {
- Title *struct {
- From *string `json:"from,omitempty"`
- } `json:"title,omitempty"`
- Body *struct {
- From *string `json:"from,omitempty"`
- } `json:"body,omitempty"`
-}
-
-// IssueCommentEvent is triggered when an issue comment is created on an issue
-// or pull request.
-// The Webhook event name is "issue_comment".
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#issuecommentevent
-type IssueCommentEvent struct {
- // Action is the action that was performed on the comment.
- // Possible values are: "created", "edited", "deleted".
- Action *string `json:"action,omitempty"`
- Issue *Issue `json:"issue,omitempty"`
- Comment *IssueComment `json:"comment,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Changes *EditChange `json:"changes,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// IssuesEvent is triggered when an issue is assigned, unassigned, labeled,
-// unlabeled, opened, closed, or reopened.
-// The Webhook event name is "issues".
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#issuesevent
-type IssuesEvent struct {
- // Action is the action that was performed. Possible values are: "assigned",
- // "unassigned", "labeled", "unlabeled", "opened", "closed", "reopened", "edited".
- Action *string `json:"action,omitempty"`
- Issue *Issue `json:"issue,omitempty"`
- Assignee *User `json:"assignee,omitempty"`
- Label *Label `json:"label,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Changes *EditChange `json:"changes,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// MemberEvent is triggered when a user is added as a collaborator to a repository.
-// The Webhook event name is "member".
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#memberevent
-type MemberEvent struct {
- // Action is the action that was performed. Possible value is: "added".
- Action *string `json:"action,omitempty"`
- Member *User `json:"member,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// MembershipEvent is triggered when a user is added or removed from a team.
-// The Webhook event name is "membership".
-//
-// Events of this type are not visible in timelines, they are only used to
-// trigger organization webhooks.
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#membershipevent
-type MembershipEvent struct {
- // Action is the action that was performed. Possible values are: "added", "removed".
- Action *string `json:"action,omitempty"`
- // Scope is the scope of the membership. Possible value is: "team".
- Scope *string `json:"scope,omitempty"`
- Member *User `json:"member,omitempty"`
- Team *Team `json:"team,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Org *Organization `json:"organization,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// PageBuildEvent represents an attempted build of a GitHub Pages site, whether
-// successful or not.
-// The Webhook event name is "page_build".
-//
-// This event is triggered on push to a GitHub Pages enabled branch (gh-pages
-// for project pages, master for user and organization pages).
-//
-// Events of this type are not visible in timelines, they are only used to trigger hooks.
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#pagebuildevent
-type PageBuildEvent struct {
- Build *PagesBuild `json:"build,omitempty"`
-
- // The following fields are only populated by Webhook events.
- ID *int `json:"id,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// PublicEvent is triggered when a private repository is open sourced.
-// According to GitHub: "Without a doubt: the best GitHub event."
-// The Webhook event name is "public".
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#publicevent
-type PublicEvent struct {
- // The following fields are only populated by Webhook events.
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// PullRequestEvent is triggered when a pull request is assigned, unassigned,
-// labeled, unlabeled, opened, closed, reopened, or synchronized.
-// The Webhook event name is "pull_request".
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#pullrequestevent
-type PullRequestEvent struct {
- // Action is the action that was performed. Possible values are: "assigned",
- // "unassigned", "labeled", "unlabeled", "opened", "closed", or "reopened",
- // "synchronize", "edited". If the action is "closed" and the merged key is false,
- // the pull request was closed with unmerged commits. If the action is "closed"
- // and the merged key is true, the pull request was merged.
- Action *string `json:"action,omitempty"`
- Number *int `json:"number,omitempty"`
- PullRequest *PullRequest `json:"pull_request,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Changes *EditChange `json:"changes,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// PullRequestReviewCommentEvent is triggered when a comment is created on a
-// portion of the unified diff of a pull request.
-// The Webhook event name is "pull_request_review_comment".
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent
-type PullRequestReviewCommentEvent struct {
- // Action is the action that was performed on the comment.
- // Possible values are: "created", "edited", "deleted".
- Action *string `json:"action,omitempty"`
- PullRequest *PullRequest `json:"pull_request,omitempty"`
- Comment *PullRequestComment `json:"comment,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Changes *EditChange `json:"changes,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// PushEvent represents a git push to a GitHub repository.
-//
-// GitHub API docs: http://developer.github.com/v3/activity/events/types/#pushevent
-type PushEvent struct {
- PushID *int `json:"push_id,omitempty"`
- Head *string `json:"head,omitempty"`
- Ref *string `json:"ref,omitempty"`
- Size *int `json:"size,omitempty"`
- Commits []PushEventCommit `json:"commits,omitempty"`
- Repo *PushEventRepository `json:"repository,omitempty"`
- Before *string `json:"before,omitempty"`
- DistinctSize *int `json:"distinct_size,omitempty"`
-
- // The following fields are only populated by Webhook events.
- After *string `json:"after,omitempty"`
- Created *bool `json:"created,omitempty"`
- Deleted *bool `json:"deleted,omitempty"`
- Forced *bool `json:"forced,omitempty"`
- BaseRef *string `json:"base_ref,omitempty"`
- Compare *string `json:"compare,omitempty"`
- HeadCommit *PushEventCommit `json:"head_commit,omitempty"`
- Pusher *User `json:"pusher,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-func (p PushEvent) String() string {
- return Stringify(p)
-}
-
-// PushEventCommit represents a git commit in a GitHub PushEvent.
-type PushEventCommit struct {
- Message *string `json:"message,omitempty"`
- Author *CommitAuthor `json:"author,omitempty"`
- URL *string `json:"url,omitempty"`
- Distinct *bool `json:"distinct,omitempty"`
-
- // The following fields are only populated by Events API.
- SHA *string `json:"sha,omitempty"`
-
- // The following fields are only populated by Webhook events.
- ID *string `json:"id,omitempty"`
- TreeID *string `json:"tree_id,omitempty"`
- Timestamp *Timestamp `json:"timestamp,omitempty"`
- Committer *CommitAuthor `json:"committer,omitempty"`
- Added []string `json:"added,omitempty"`
- Removed []string `json:"removed,omitempty"`
- Modified []string `json:"modified,omitempty"`
-}
-
-func (p PushEventCommit) String() string {
- return Stringify(p)
-}
-
-// PushEventRepository represents the repo object in a PushEvent payload
-type PushEventRepository struct {
- ID *int `json:"id,omitempty"`
- Name *string `json:"name,omitempty"`
- FullName *string `json:"full_name,omitempty"`
- Owner *PushEventRepoOwner `json:"owner,omitempty"`
- Private *bool `json:"private,omitempty"`
- Description *string `json:"description,omitempty"`
- Fork *bool `json:"fork,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- PushedAt *Timestamp `json:"pushed_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- Homepage *string `json:"homepage,omitempty"`
- Size *int `json:"size,omitempty"`
- StargazersCount *int `json:"stargazers_count,omitempty"`
- WatchersCount *int `json:"watchers_count,omitempty"`
- Language *string `json:"language,omitempty"`
- HasIssues *bool `json:"has_issues,omitempty"`
- HasDownloads *bool `json:"has_downloads,omitempty"`
- HasWiki *bool `json:"has_wiki,omitempty"`
- HasPages *bool `json:"has_pages,omitempty"`
- ForksCount *int `json:"forks_count,omitempty"`
- OpenIssuesCount *int `json:"open_issues_count,omitempty"`
- DefaultBranch *string `json:"default_branch,omitempty"`
- MasterBranch *string `json:"master_branch,omitempty"`
- Organization *string `json:"organization,omitempty"`
-
- // The following fields are only populated by Webhook events.
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
-}
-
-// PushEventRepoOwner is a basic reporesntation of user/org in a PushEvent payload
-type PushEventRepoOwner struct {
- Name *string `json:"name,omitempty"`
- Email *string `json:"email,omitempty"`
-}
-
-// ReleaseEvent is triggered when a release is published.
-// The Webhook event name is "release".
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#releaseevent
-type ReleaseEvent struct {
- // Action is the action that was performed. Possible value is: "published".
- Action *string `json:"action,omitempty"`
- Release *RepositoryRelease `json:"release,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// RepositoryEvent is triggered when a repository is created.
-// The Webhook event name is "repository".
-//
-// Events of this type are not visible in timelines, they are only used to
-// trigger organization webhooks.
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#repositoryevent
-type RepositoryEvent struct {
- // Action is the action that was performed. Possible values are: "created", "deleted",
- // "publicized", "privatized".
- Action *string `json:"action,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Org *Organization `json:"organization,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// StatusEvent is triggered when the status of a Git commit changes.
-// The Webhook event name is "status".
-//
-// Events of this type are not visible in timelines, they are only used to
-// trigger hooks.
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#statusevent
-type StatusEvent struct {
- SHA *string `json:"sha,omitempty"`
- // State is the new state. Possible values are: "pending", "success", "failure", "error".
- State *string `json:"state,omitempty"`
- Description *string `json:"description,omitempty"`
- TargetURL *string `json:"target_url,omitempty"`
- Branches []*Branch `json:"branches,omitempty"`
-
- // The following fields are only populated by Webhook events.
- ID *int `json:"id,omitempty"`
- Name *string `json:"name,omitempty"`
- Context *string `json:"context,omitempty"`
- Commit *PushEventCommit `json:"commit,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// TeamAddEvent is triggered when a repository is added to a team.
-// The Webhook event name is "team_add".
-//
-// Events of this type are not visible in timelines. These events are only used
-// to trigger hooks.
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#teamaddevent
-type TeamAddEvent struct {
- Team *Team `json:"team,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Org *Organization `json:"organization,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-// WatchEvent is related to starring a repository, not watching. See this API
-// blog post for an explanation: https://developer.github.com/changes/2012-09-05-watcher-api/
-//
-// The event’s actor is the user who starred a repository, and the event’s
-// repository is the repository that was starred.
-//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#watchevent
-type WatchEvent struct {
- // Action is the action that was performed. Possible value is: "started".
- Action *string `json:"action,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
diff --git a/vendor/src/github.com/google/go-github/github/examples_test.go b/vendor/src/github.com/google/go-github/github/examples_test.go
deleted file mode 100644
index 7b754cd..0000000
--- a/vendor/src/github.com/google/go-github/github/examples_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github_test
-
-import (
- "fmt"
- "log"
-
- "github.com/google/go-github/github"
-)
-
-func ExampleClient_Markdown() {
- client := github.NewClient(nil)
-
- input := "# heading #\n\nLink to issue #1"
- opt := &github.MarkdownOptions{Mode: "gfm", Context: "google/go-github"}
-
- output, _, err := client.Markdown(input, opt)
- if err != nil {
- fmt.Println(err)
- }
-
- fmt.Println(output)
-}
-
-func ExampleRepositoriesService_GetReadme() {
- client := github.NewClient(nil)
-
- readme, _, err := client.Repositories.GetReadme("google", "go-github", nil)
- if err != nil {
- fmt.Println(err)
- return
- }
-
- content, err := readme.GetContent()
- if err != nil {
- fmt.Println(err)
- return
- }
-
- fmt.Printf("google/go-github README:\n%v\n", content)
-}
-
-func ExampleRepositoriesService_List() {
- client := github.NewClient(nil)
-
- user := "willnorris"
- opt := &github.RepositoryListOptions{Type: "owner", Sort: "updated", Direction: "desc"}
-
- repos, _, err := client.Repositories.List(user, opt)
- if err != nil {
- fmt.Println(err)
- }
-
- fmt.Printf("Recently updated repositories by %q: %v", user, github.Stringify(repos))
-}
-
-func ExampleUsersService_ListAll() {
- client := github.NewClient(nil)
- opts := &github.UserListOptions{}
- for {
- users, _, err := client.Users.ListAll(opts)
- if err != nil {
- log.Fatalf("error listing users: %v", err)
- }
- if len(users) == 0 {
- break
- }
- opts.Since = *users[len(users)-1].ID
- // Process users...
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/gists.go b/vendor/src/github.com/google/go-github/github/gists.go
deleted file mode 100644
index 697fcb5..0000000
--- a/vendor/src/github.com/google/go-github/github/gists.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// GistsService handles communication with the Gist related
-// methods of the GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/
-type GistsService service
-
-// Gist represents a GitHub's gist.
-type Gist struct {
- ID *string `json:"id,omitempty"`
- Description *string `json:"description,omitempty"`
- Public *bool `json:"public,omitempty"`
- Owner *User `json:"owner,omitempty"`
- Files map[GistFilename]GistFile `json:"files,omitempty"`
- Comments *int `json:"comments,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- GitPullURL *string `json:"git_pull_url,omitempty"`
- GitPushURL *string `json:"git_push_url,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
-}
-
-func (g Gist) String() string {
- return Stringify(g)
-}
-
-// GistFilename represents filename on a gist.
-type GistFilename string
-
-// GistFile represents a file on a gist.
-type GistFile struct {
- Size *int `json:"size,omitempty"`
- Filename *string `json:"filename,omitempty"`
- RawURL *string `json:"raw_url,omitempty"`
- Content *string `json:"content,omitempty"`
-}
-
-func (g GistFile) String() string {
- return Stringify(g)
-}
-
-// GistCommit represents a commit on a gist.
-type GistCommit struct {
- URL *string `json:"url,omitempty"`
- Version *string `json:"version,omitempty"`
- User *User `json:"user,omitempty"`
- ChangeStatus *CommitStats `json:"change_status,omitempty"`
- CommitedAt *Timestamp `json:"commited_at,omitempty"`
-}
-
-func (gc GistCommit) String() string {
- return Stringify(gc)
-}
-
-// GistFork represents a fork of a gist.
-type GistFork struct {
- URL *string `json:"url,omitempty"`
- User *User `json:"user,omitempty"`
- ID *string `json:"id,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
-}
-
-func (gf GistFork) String() string {
- return Stringify(gf)
-}
-
-// GistListOptions specifies the optional parameters to the
-// GistsService.List, GistsService.ListAll, and GistsService.ListStarred methods.
-type GistListOptions struct {
- // Since filters Gists by time.
- Since time.Time `url:"since,omitempty"`
-
- ListOptions
-}
-
-// List gists for a user. Passing the empty string will list
-// all public gists if called anonymously. However, if the call
-// is authenticated, it will returns all gists for the authenticated
-// user.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/#list-gists
-func (s *GistsService) List(user string, opt *GistListOptions) ([]*Gist, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("users/%v/gists", user)
- } else {
- u = "gists"
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- gists := new([]*Gist)
- resp, err := s.client.Do(req, gists)
- if err != nil {
- return nil, resp, err
- }
-
- return *gists, resp, err
-}
-
-// ListAll lists all public gists.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/#list-gists
-func (s *GistsService) ListAll(opt *GistListOptions) ([]*Gist, *Response, error) {
- u, err := addOptions("gists/public", opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- gists := new([]*Gist)
- resp, err := s.client.Do(req, gists)
- if err != nil {
- return nil, resp, err
- }
-
- return *gists, resp, err
-}
-
-// ListStarred lists starred gists of authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/#list-gists
-func (s *GistsService) ListStarred(opt *GistListOptions) ([]*Gist, *Response, error) {
- u, err := addOptions("gists/starred", opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- gists := new([]*Gist)
- resp, err := s.client.Do(req, gists)
- if err != nil {
- return nil, resp, err
- }
-
- return *gists, resp, err
-}
-
-// Get a single gist.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/#get-a-single-gist
-func (s *GistsService) Get(id string) (*Gist, *Response, error) {
- u := fmt.Sprintf("gists/%v", id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
- gist := new(Gist)
- resp, err := s.client.Do(req, gist)
- if err != nil {
- return nil, resp, err
- }
-
- return gist, resp, err
-}
-
-// GetRevision gets a specific revision of a gist.
-//
-// GitHub API docs: https://developer.github.com/v3/gists/#get-a-specific-revision-of-a-gist
-func (s *GistsService) GetRevision(id, sha string) (*Gist, *Response, error) {
- u := fmt.Sprintf("gists/%v/%v", id, sha)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
- gist := new(Gist)
- resp, err := s.client.Do(req, gist)
- if err != nil {
- return nil, resp, err
- }
-
- return gist, resp, err
-}
-
-// Create a gist for authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/#create-a-gist
-func (s *GistsService) Create(gist *Gist) (*Gist, *Response, error) {
- u := "gists"
- req, err := s.client.NewRequest("POST", u, gist)
- if err != nil {
- return nil, nil, err
- }
- g := new(Gist)
- resp, err := s.client.Do(req, g)
- if err != nil {
- return nil, resp, err
- }
-
- return g, resp, err
-}
-
-// Edit a gist.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/#edit-a-gist
-func (s *GistsService) Edit(id string, gist *Gist) (*Gist, *Response, error) {
- u := fmt.Sprintf("gists/%v", id)
- req, err := s.client.NewRequest("PATCH", u, gist)
- if err != nil {
- return nil, nil, err
- }
- g := new(Gist)
- resp, err := s.client.Do(req, g)
- if err != nil {
- return nil, resp, err
- }
-
- return g, resp, err
-}
-
-// ListCommits lists commits of a gist.
-//
-// Github API docs: https://developer.github.com/v3/gists/#list-gist-commits
-func (s *GistsService) ListCommits(id string) ([]*GistCommit, *Response, error) {
- u := fmt.Sprintf("gists/%v/commits", id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- gistCommits := new([]*GistCommit)
- resp, err := s.client.Do(req, gistCommits)
- if err != nil {
- return nil, resp, err
- }
-
- return *gistCommits, resp, err
-}
-
-// Delete a gist.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/#delete-a-gist
-func (s *GistsService) Delete(id string) (*Response, error) {
- u := fmt.Sprintf("gists/%v", id)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// Star a gist on behalf of authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/#star-a-gist
-func (s *GistsService) Star(id string) (*Response, error) {
- u := fmt.Sprintf("gists/%v/star", id)
- req, err := s.client.NewRequest("PUT", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// Unstar a gist on a behalf of authenticated user.
-//
-// Github API docs: http://developer.github.com/v3/gists/#unstar-a-gist
-func (s *GistsService) Unstar(id string) (*Response, error) {
- u := fmt.Sprintf("gists/%v/star", id)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// IsStarred checks if a gist is starred by authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/#check-if-a-gist-is-starred
-func (s *GistsService) IsStarred(id string) (bool, *Response, error) {
- u := fmt.Sprintf("gists/%v/star", id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return false, nil, err
- }
- resp, err := s.client.Do(req, nil)
- starred, err := parseBoolResponse(err)
- return starred, resp, err
-}
-
-// Fork a gist.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/#fork-a-gist
-func (s *GistsService) Fork(id string) (*Gist, *Response, error) {
- u := fmt.Sprintf("gists/%v/forks", id)
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- g := new(Gist)
- resp, err := s.client.Do(req, g)
- if err != nil {
- return nil, resp, err
- }
-
- return g, resp, err
-}
-
-// ListForks lists forks of a gist.
-//
-// Github API docs: https://developer.github.com/v3/gists/#list-gist-forks
-func (s *GistsService) ListForks(id string) ([]*GistFork, *Response, error) {
- u := fmt.Sprintf("gists/%v/forks", id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- gistForks := new([]*GistFork)
- resp, err := s.client.Do(req, gistForks)
- if err != nil {
- return nil, resp, err
- }
-
- return *gistForks, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/gists_comments.go b/vendor/src/github.com/google/go-github/github/gists_comments.go
deleted file mode 100644
index 95a7fc7..0000000
--- a/vendor/src/github.com/google/go-github/github/gists_comments.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// GistComment represents a Gist comment.
-type GistComment struct {
- ID *int `json:"id,omitempty"`
- URL *string `json:"url,omitempty"`
- Body *string `json:"body,omitempty"`
- User *User `json:"user,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
-}
-
-func (g GistComment) String() string {
- return Stringify(g)
-}
-
-// ListComments lists all comments for a gist.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/comments/#list-comments-on-a-gist
-func (s *GistsService) ListComments(gistID string, opt *ListOptions) ([]*GistComment, *Response, error) {
- u := fmt.Sprintf("gists/%v/comments", gistID)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- comments := new([]*GistComment)
- resp, err := s.client.Do(req, comments)
- if err != nil {
- return nil, resp, err
- }
-
- return *comments, resp, err
-}
-
-// GetComment retrieves a single comment from a gist.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/comments/#get-a-single-comment
-func (s *GistsService) GetComment(gistID string, commentID int) (*GistComment, *Response, error) {
- u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- c := new(GistComment)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// CreateComment creates a comment for a gist.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/comments/#create-a-comment
-func (s *GistsService) CreateComment(gistID string, comment *GistComment) (*GistComment, *Response, error) {
- u := fmt.Sprintf("gists/%v/comments", gistID)
- req, err := s.client.NewRequest("POST", u, comment)
- if err != nil {
- return nil, nil, err
- }
-
- c := new(GistComment)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// EditComment edits an existing gist comment.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/comments/#edit-a-comment
-func (s *GistsService) EditComment(gistID string, commentID int, comment *GistComment) (*GistComment, *Response, error) {
- u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID)
- req, err := s.client.NewRequest("PATCH", u, comment)
- if err != nil {
- return nil, nil, err
- }
-
- c := new(GistComment)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// DeleteComment deletes a gist comment.
-//
-// GitHub API docs: http://developer.github.com/v3/gists/comments/#delete-a-comment
-func (s *GistsService) DeleteComment(gistID string, commentID int) (*Response, error) {
- u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/gists_comments_test.go b/vendor/src/github.com/google/go-github/github/gists_comments_test.go
deleted file mode 100644
index eacd89f..0000000
--- a/vendor/src/github.com/google/go-github/github/gists_comments_test.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestGistsService_ListComments(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/comments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id": 1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- comments, _, err := client.Gists.ListComments("1", opt)
- if err != nil {
- t.Errorf("Gists.Comments returned error: %v", err)
- }
-
- want := []*GistComment{{ID: Int(1)}}
- if !reflect.DeepEqual(comments, want) {
- t.Errorf("Gists.ListComments returned %+v, want %+v", comments, want)
- }
-}
-
-func TestGistsService_ListComments_invalidID(t *testing.T) {
- _, _, err := client.Gists.ListComments("%", nil)
- testURLParseError(t, err)
-}
-
-func TestGistsService_GetComment(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id": 1}`)
- })
-
- comment, _, err := client.Gists.GetComment("1", 2)
- if err != nil {
- t.Errorf("Gists.GetComment returned error: %v", err)
- }
-
- want := &GistComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("Gists.GetComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestGistsService_GetComment_invalidID(t *testing.T) {
- _, _, err := client.Gists.GetComment("%", 1)
- testURLParseError(t, err)
-}
-
-func TestGistsService_CreateComment(t *testing.T) {
- setup()
- defer teardown()
-
- input := &GistComment{ID: Int(1), Body: String("b")}
-
- mux.HandleFunc("/gists/1/comments", func(w http.ResponseWriter, r *http.Request) {
- v := new(GistComment)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.Gists.CreateComment("1", input)
- if err != nil {
- t.Errorf("Gists.CreateComment returned error: %v", err)
- }
-
- want := &GistComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("Gists.CreateComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestGistsService_CreateComment_invalidID(t *testing.T) {
- _, _, err := client.Gists.CreateComment("%", nil)
- testURLParseError(t, err)
-}
-
-func TestGistsService_EditComment(t *testing.T) {
- setup()
- defer teardown()
-
- input := &GistComment{ID: Int(1), Body: String("b")}
-
- mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) {
- v := new(GistComment)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.Gists.EditComment("1", 2, input)
- if err != nil {
- t.Errorf("Gists.EditComment returned error: %v", err)
- }
-
- want := &GistComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("Gists.EditComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestGistsService_EditComment_invalidID(t *testing.T) {
- _, _, err := client.Gists.EditComment("%", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestGistsService_DeleteComment(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/comments/2", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Gists.DeleteComment("1", 2)
- if err != nil {
- t.Errorf("Gists.Delete returned error: %v", err)
- }
-}
-
-func TestGistsService_DeleteComment_invalidID(t *testing.T) {
- _, err := client.Gists.DeleteComment("%", 1)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/gists_test.go b/vendor/src/github.com/google/go-github/github/gists_test.go
deleted file mode 100644
index 0b081ae..0000000
--- a/vendor/src/github.com/google/go-github/github/gists_test.go
+++ /dev/null
@@ -1,488 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
- "time"
-)
-
-func TestGistsService_List_specifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- since := "2013-01-01T00:00:00Z"
-
- mux.HandleFunc("/users/u/gists", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "since": since,
- })
- fmt.Fprint(w, `[{"id": "1"}]`)
- })
-
- opt := &GistListOptions{Since: time.Date(2013, time.January, 1, 0, 0, 0, 0, time.UTC)}
- gists, _, err := client.Gists.List("u", opt)
- if err != nil {
- t.Errorf("Gists.List returned error: %v", err)
- }
-
- want := []*Gist{{ID: String("1")}}
- if !reflect.DeepEqual(gists, want) {
- t.Errorf("Gists.List returned %+v, want %+v", gists, want)
- }
-}
-
-func TestGistsService_List_authenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{"id": "1"}]`)
- })
-
- gists, _, err := client.Gists.List("", nil)
- if err != nil {
- t.Errorf("Gists.List returned error: %v", err)
- }
-
- want := []*Gist{{ID: String("1")}}
- if !reflect.DeepEqual(gists, want) {
- t.Errorf("Gists.List returned %+v, want %+v", gists, want)
- }
-}
-
-func TestGistsService_List_invalidUser(t *testing.T) {
- _, _, err := client.Gists.List("%", nil)
- testURLParseError(t, err)
-}
-
-func TestGistsService_ListAll(t *testing.T) {
- setup()
- defer teardown()
-
- since := "2013-01-01T00:00:00Z"
-
- mux.HandleFunc("/gists/public", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "since": since,
- })
- fmt.Fprint(w, `[{"id": "1"}]`)
- })
-
- opt := &GistListOptions{Since: time.Date(2013, time.January, 1, 0, 0, 0, 0, time.UTC)}
- gists, _, err := client.Gists.ListAll(opt)
- if err != nil {
- t.Errorf("Gists.ListAll returned error: %v", err)
- }
-
- want := []*Gist{{ID: String("1")}}
- if !reflect.DeepEqual(gists, want) {
- t.Errorf("Gists.ListAll returned %+v, want %+v", gists, want)
- }
-}
-
-func TestGistsService_ListStarred(t *testing.T) {
- setup()
- defer teardown()
-
- since := "2013-01-01T00:00:00Z"
-
- mux.HandleFunc("/gists/starred", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "since": since,
- })
- fmt.Fprint(w, `[{"id": "1"}]`)
- })
-
- opt := &GistListOptions{Since: time.Date(2013, time.January, 1, 0, 0, 0, 0, time.UTC)}
- gists, _, err := client.Gists.ListStarred(opt)
- if err != nil {
- t.Errorf("Gists.ListStarred returned error: %v", err)
- }
-
- want := []*Gist{{ID: String("1")}}
- if !reflect.DeepEqual(gists, want) {
- t.Errorf("Gists.ListStarred returned %+v, want %+v", gists, want)
- }
-}
-
-func TestGistsService_Get(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id": "1"}`)
- })
-
- gist, _, err := client.Gists.Get("1")
- if err != nil {
- t.Errorf("Gists.Get returned error: %v", err)
- }
-
- want := &Gist{ID: String("1")}
- if !reflect.DeepEqual(gist, want) {
- t.Errorf("Gists.Get returned %+v, want %+v", gist, want)
- }
-}
-
-func TestGistsService_Get_invalidID(t *testing.T) {
- _, _, err := client.Gists.Get("%")
- testURLParseError(t, err)
-}
-
-func TestGistsService_GetRevision(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/s", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id": "1"}`)
- })
-
- gist, _, err := client.Gists.GetRevision("1", "s")
- if err != nil {
- t.Errorf("Gists.Get returned error: %v", err)
- }
-
- want := &Gist{ID: String("1")}
- if !reflect.DeepEqual(gist, want) {
- t.Errorf("Gists.Get returned %+v, want %+v", gist, want)
- }
-}
-
-func TestGistsService_GetRevision_invalidID(t *testing.T) {
- _, _, err := client.Gists.GetRevision("%", "%")
- testURLParseError(t, err)
-}
-
-func TestGistsService_Create(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Gist{
- Description: String("Gist description"),
- Public: Bool(false),
- Files: map[GistFilename]GistFile{
- "test.txt": {Content: String("Gist file content")},
- },
- }
-
- mux.HandleFunc("/gists", func(w http.ResponseWriter, r *http.Request) {
- v := new(Gist)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w,
- `
- {
- "id": "1",
- "description": "Gist description",
- "public": false,
- "files": {
- "test.txt": {
- "filename": "test.txt"
- }
- }
- }`)
- })
-
- gist, _, err := client.Gists.Create(input)
- if err != nil {
- t.Errorf("Gists.Create returned error: %v", err)
- }
-
- want := &Gist{
- ID: String("1"),
- Description: String("Gist description"),
- Public: Bool(false),
- Files: map[GistFilename]GistFile{
- "test.txt": {Filename: String("test.txt")},
- },
- }
- if !reflect.DeepEqual(gist, want) {
- t.Errorf("Gists.Create returned %+v, want %+v", gist, want)
- }
-}
-
-func TestGistsService_Edit(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Gist{
- Description: String("New description"),
- Files: map[GistFilename]GistFile{
- "new.txt": {Content: String("new file content")},
- },
- }
-
- mux.HandleFunc("/gists/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(Gist)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w,
- `
- {
- "id": "1",
- "description": "new description",
- "public": false,
- "files": {
- "test.txt": {
- "filename": "test.txt"
- },
- "new.txt": {
- "filename": "new.txt"
- }
- }
- }`)
- })
-
- gist, _, err := client.Gists.Edit("1", input)
- if err != nil {
- t.Errorf("Gists.Edit returned error: %v", err)
- }
-
- want := &Gist{
- ID: String("1"),
- Description: String("new description"),
- Public: Bool(false),
- Files: map[GistFilename]GistFile{
- "test.txt": {Filename: String("test.txt")},
- "new.txt": {Filename: String("new.txt")},
- },
- }
- if !reflect.DeepEqual(gist, want) {
- t.Errorf("Gists.Edit returned %+v, want %+v", gist, want)
- }
-}
-
-func TestGistsService_Edit_invalidID(t *testing.T) {
- _, _, err := client.Gists.Edit("%", nil)
- testURLParseError(t, err)
-}
-
-func TestGistsService_ListCommits(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/commits", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, nil)
- fmt.Fprint(w, `
- [
- {
- "url": "https://api.github.com/gists/1/1",
- "version": "1",
- "user": {
- "id": 1
- },
- "change_status": {
- "deletions": 0,
- "additions": 180,
- "total": 180
- },
- "commited_at": "2010-01-01T00:00:00Z"
- }
- ]
- `)
- })
-
- gistCommits, _, err := client.Gists.ListCommits("1")
- if err != nil {
- t.Errorf("Gists.ListCommits returned error: %v", err)
- }
-
- want := []*GistCommit{{
- URL: String("https://api.github.com/gists/1/1"),
- Version: String("1"),
- User: &User{ID: Int(1)},
- CommitedAt: &Timestamp{time.Date(2010, 1, 1, 00, 00, 00, 0, time.UTC)},
- ChangeStatus: &CommitStats{
- Additions: Int(180),
- Deletions: Int(0),
- Total: Int(180),
- }}}
-
- if !reflect.DeepEqual(gistCommits, want) {
- t.Errorf("Gists.ListCommits returned %+v, want %+v", gistCommits, want)
- }
-}
-
-func TestGistsService_Delete(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Gists.Delete("1")
- if err != nil {
- t.Errorf("Gists.Delete returned error: %v", err)
- }
-}
-
-func TestGistsService_Delete_invalidID(t *testing.T) {
- _, err := client.Gists.Delete("%")
- testURLParseError(t, err)
-}
-
-func TestGistsService_Star(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- })
-
- _, err := client.Gists.Star("1")
- if err != nil {
- t.Errorf("Gists.Star returned error: %v", err)
- }
-}
-
-func TestGistsService_Star_invalidID(t *testing.T) {
- _, err := client.Gists.Star("%")
- testURLParseError(t, err)
-}
-
-func TestGistsService_Unstar(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Gists.Unstar("1")
- if err != nil {
- t.Errorf("Gists.Unstar returned error: %v", err)
- }
-}
-
-func TestGistsService_Unstar_invalidID(t *testing.T) {
- _, err := client.Gists.Unstar("%")
- testURLParseError(t, err)
-}
-
-func TestGistsService_IsStarred_hasStar(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNoContent)
- })
-
- star, _, err := client.Gists.IsStarred("1")
- if err != nil {
- t.Errorf("Gists.Starred returned error: %v", err)
- }
- if want := true; star != want {
- t.Errorf("Gists.Starred returned %+v, want %+v", star, want)
- }
-}
-
-func TestGistsService_IsStarred_noStar(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/star", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNotFound)
- })
-
- star, _, err := client.Gists.IsStarred("1")
- if err != nil {
- t.Errorf("Gists.Starred returned error: %v", err)
- }
- if want := false; star != want {
- t.Errorf("Gists.Starred returned %+v, want %+v", star, want)
- }
-}
-
-func TestGistsService_IsStarred_invalidID(t *testing.T) {
- _, _, err := client.Gists.IsStarred("%")
- testURLParseError(t, err)
-}
-
-func TestGistsService_Fork(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/forks", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- fmt.Fprint(w, `{"id": "2"}`)
- })
-
- gist, _, err := client.Gists.Fork("1")
- if err != nil {
- t.Errorf("Gists.Fork returned error: %v", err)
- }
-
- want := &Gist{ID: String("2")}
- if !reflect.DeepEqual(gist, want) {
- t.Errorf("Gists.Fork returned %+v, want %+v", gist, want)
- }
-}
-
-func TestGistsService_ListForks(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gists/1/forks", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, nil)
- fmt.Fprint(w, `
- [
- {"url": "https://api.github.com/gists/1",
- "user": {"id": 1},
- "id": "1",
- "created_at": "2010-01-01T00:00:00Z",
- "updated_at": "2013-01-01T00:00:00Z"
- }
- ]
- `)
- })
-
- gistForks, _, err := client.Gists.ListForks("1")
- if err != nil {
- t.Errorf("Gists.ListForks returned error: %v", err)
- }
-
- want := []*GistFork{{
- URL: String("https://api.github.com/gists/1"),
- ID: String("1"),
- User: &User{ID: Int(1)},
- CreatedAt: &Timestamp{time.Date(2010, 1, 1, 00, 00, 00, 0, time.UTC)},
- UpdatedAt: &Timestamp{time.Date(2013, 1, 1, 00, 00, 00, 0, time.UTC)}}}
-
- if !reflect.DeepEqual(gistForks, want) {
- t.Errorf("Gists.ListForks returned %+v, want %+v", gistForks, want)
- }
-}
-
-func TestGistsService_Fork_invalidID(t *testing.T) {
- _, _, err := client.Gists.Fork("%")
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/git.go b/vendor/src/github.com/google/go-github/github/git.go
deleted file mode 100644
index c934751..0000000
--- a/vendor/src/github.com/google/go-github/github/git.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-// GitService handles communication with the git data related
-// methods of the GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/git/
-type GitService service
diff --git a/vendor/src/github.com/google/go-github/github/git_blobs.go b/vendor/src/github.com/google/go-github/github/git_blobs.go
deleted file mode 100644
index 55148fd..0000000
--- a/vendor/src/github.com/google/go-github/github/git_blobs.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// Blob represents a blob object.
-type Blob struct {
- Content *string `json:"content,omitempty"`
- Encoding *string `json:"encoding,omitempty"`
- SHA *string `json:"sha,omitempty"`
- Size *int `json:"size,omitempty"`
- URL *string `json:"url,omitempty"`
-}
-
-// GetBlob fetchs a blob from a repo given a SHA.
-//
-// GitHub API docs: http://developer.github.com/v3/git/blobs/#get-a-blob
-func (s *GitService) GetBlob(owner string, repo string, sha string) (*Blob, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/git/blobs/%v", owner, repo, sha)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- blob := new(Blob)
- resp, err := s.client.Do(req, blob)
- return blob, resp, err
-}
-
-// CreateBlob creates a blob object.
-//
-// GitHub API docs: https://developer.github.com/v3/git/blobs/#create-a-blob
-func (s *GitService) CreateBlob(owner string, repo string, blob *Blob) (*Blob, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/git/blobs", owner, repo)
- req, err := s.client.NewRequest("POST", u, blob)
- if err != nil {
- return nil, nil, err
- }
-
- t := new(Blob)
- resp, err := s.client.Do(req, t)
- return t, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/git_blobs_test.go b/vendor/src/github.com/google/go-github/github/git_blobs_test.go
deleted file mode 100644
index 9530e16..0000000
--- a/vendor/src/github.com/google/go-github/github/git_blobs_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestGitService_GetBlob(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/git/blobs/s", func(w http.ResponseWriter, r *http.Request) {
- if m := "GET"; m != r.Method {
- t.Errorf("Request method = %v, want %v", r.Method, m)
- }
- fmt.Fprint(w, `{
- "sha": "s",
- "content": "blob content"
- }`)
- })
-
- blob, _, err := client.Git.GetBlob("o", "r", "s")
- if err != nil {
- t.Errorf("Git.GetBlob returned error: %v", err)
- }
-
- want := Blob{
- SHA: String("s"),
- Content: String("blob content"),
- }
-
- if !reflect.DeepEqual(*blob, want) {
- t.Errorf("Blob.Get returned %+v, want %+v", *blob, want)
- }
-}
-
-func TestGitService_GetBlob_invalidOwner(t *testing.T) {
- _, _, err := client.Git.GetBlob("%", "%", "%")
- testURLParseError(t, err)
-}
-
-func TestGitService_CreateBlob(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Blob{
- SHA: String("s"),
- Content: String("blob content"),
- Encoding: String("utf-8"),
- Size: Int(12),
- }
-
- mux.HandleFunc("/repos/o/r/git/blobs", func(w http.ResponseWriter, r *http.Request) {
- v := new(Blob)
- json.NewDecoder(r.Body).Decode(v)
-
- if m := "POST"; m != r.Method {
- t.Errorf("Request method = %v, want %v", r.Method, m)
- }
-
- want := input
- if !reflect.DeepEqual(v, want) {
- t.Errorf("Git.CreateBlob request body: %+v, want %+v", v, want)
- }
-
- fmt.Fprint(w, `{
- "sha": "s",
- "content": "blob content",
- "encoding": "utf-8",
- "size": 12
- }`)
- })
-
- blob, _, err := client.Git.CreateBlob("o", "r", input)
- if err != nil {
- t.Errorf("Git.CreateBlob returned error: %v", err)
- }
-
- want := input
-
- if !reflect.DeepEqual(*blob, *want) {
- t.Errorf("Git.CreateBlob returned %+v, want %+v", *blob, *want)
- }
-}
-
-func TestGitService_CreateBlob_invalidOwner(t *testing.T) {
- _, _, err := client.Git.CreateBlob("%", "%", &Blob{})
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/git_commits.go b/vendor/src/github.com/google/go-github/github/git_commits.go
deleted file mode 100644
index 0bcad41..0000000
--- a/vendor/src/github.com/google/go-github/github/git_commits.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// SignatureVerification represents GPG signature verification.
-type SignatureVerification struct {
- Verified *bool `json:"verified,omitempty"`
- Reason *string `json:"reason,omitempty"`
- Signature *string `json:"signature,omitempty"`
- Payload *string `json:"payload,omitempty"`
-}
-
-// Commit represents a GitHub commit.
-type Commit struct {
- SHA *string `json:"sha,omitempty"`
- Author *CommitAuthor `json:"author,omitempty"`
- Committer *CommitAuthor `json:"committer,omitempty"`
- Message *string `json:"message,omitempty"`
- Tree *Tree `json:"tree,omitempty"`
- Parents []Commit `json:"parents,omitempty"`
- Stats *CommitStats `json:"stats,omitempty"`
- URL *string `json:"url,omitempty"`
- Verification *SignatureVerification `json:"verification,omitempty"`
-
- // CommentCount is the number of GitHub comments on the commit. This
- // is only populated for requests that fetch GitHub data like
- // Pulls.ListCommits, Repositories.ListCommits, etc.
- CommentCount *int `json:"comment_count,omitempty"`
-}
-
-func (c Commit) String() string {
- return Stringify(c)
-}
-
-// CommitAuthor represents the author or committer of a commit. The commit
-// author may not correspond to a GitHub User.
-type CommitAuthor struct {
- Date *time.Time `json:"date,omitempty"`
- Name *string `json:"name,omitempty"`
- Email *string `json:"email,omitempty"`
-
- // The following fields are only populated by Webhook events.
- Login *string `json:"username,omitempty"` // Renamed for go-github consistency.
-}
-
-func (c CommitAuthor) String() string {
- return Stringify(c)
-}
-
-// GetCommit fetchs the Commit object for a given SHA.
-//
-// GitHub API docs: http://developer.github.com/v3/git/commits/#get-a-commit
-func (s *GitService) GetCommit(owner string, repo string, sha string) (*Commit, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/git/commits/%v", owner, repo, sha)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeGitSigningPreview)
-
- c := new(Commit)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// createCommit represents the body of a CreateCommit request.
-type createCommit struct {
- Author *CommitAuthor `json:"author,omitempty"`
- Committer *CommitAuthor `json:"committer,omitempty"`
- Message *string `json:"message,omitempty"`
- Tree *string `json:"tree,omitempty"`
- Parents []string `json:"parents,omitempty"`
-}
-
-// CreateCommit creates a new commit in a repository.
-//
-// The commit.Committer is optional and will be filled with the commit.Author
-// data if omitted. If the commit.Author is omitted, it will be filled in with
-// the authenticated user’s information and the current date.
-//
-// GitHub API docs: http://developer.github.com/v3/git/commits/#create-a-commit
-func (s *GitService) CreateCommit(owner string, repo string, commit *Commit) (*Commit, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/git/commits", owner, repo)
-
- body := &createCommit{}
- if commit != nil {
- parents := make([]string, len(commit.Parents))
- for i, parent := range commit.Parents {
- parents[i] = *parent.SHA
- }
-
- body = &createCommit{
- Author: commit.Author,
- Committer: commit.Committer,
- Message: commit.Message,
- Tree: commit.Tree.SHA,
- Parents: parents,
- }
- }
-
- req, err := s.client.NewRequest("POST", u, body)
- if err != nil {
- return nil, nil, err
- }
-
- c := new(Commit)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/git_commits_test.go b/vendor/src/github.com/google/go-github/github/git_commits_test.go
deleted file mode 100644
index 566ac4f..0000000
--- a/vendor/src/github.com/google/go-github/github/git_commits_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestGitService_GetCommit(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/git/commits/s", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeGitSigningPreview)
- fmt.Fprint(w, `{"sha":"s","message":"m","author":{"name":"n"}}`)
- })
-
- commit, _, err := client.Git.GetCommit("o", "r", "s")
- if err != nil {
- t.Errorf("Git.GetCommit returned error: %v", err)
- }
-
- want := &Commit{SHA: String("s"), Message: String("m"), Author: &CommitAuthor{Name: String("n")}}
- if !reflect.DeepEqual(commit, want) {
- t.Errorf("Git.GetCommit returned %+v, want %+v", commit, want)
- }
-}
-
-func TestGitService_GetCommit_invalidOwner(t *testing.T) {
- _, _, err := client.Git.GetCommit("%", "%", "%")
- testURLParseError(t, err)
-}
-
-func TestGitService_CreateCommit(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Commit{
- Message: String("m"),
- Tree: &Tree{SHA: String("t")},
- Parents: []Commit{{SHA: String("p")}},
- }
-
- mux.HandleFunc("/repos/o/r/git/commits", func(w http.ResponseWriter, r *http.Request) {
- v := new(createCommit)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
-
- want := &createCommit{
- Message: input.Message,
- Tree: String("t"),
- Parents: []string{"p"},
- }
- if !reflect.DeepEqual(v, want) {
- t.Errorf("Request body = %+v, want %+v", v, want)
- }
- fmt.Fprint(w, `{"sha":"s"}`)
- })
-
- commit, _, err := client.Git.CreateCommit("o", "r", input)
- if err != nil {
- t.Errorf("Git.CreateCommit returned error: %v", err)
- }
-
- want := &Commit{SHA: String("s")}
- if !reflect.DeepEqual(commit, want) {
- t.Errorf("Git.CreateCommit returned %+v, want %+v", commit, want)
- }
-}
-
-func TestGitService_CreateCommit_invalidOwner(t *testing.T) {
- _, _, err := client.Git.CreateCommit("%", "%", nil)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/git_refs.go b/vendor/src/github.com/google/go-github/github/git_refs.go
deleted file mode 100644
index 16cbd6b..0000000
--- a/vendor/src/github.com/google/go-github/github/git_refs.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "strings"
-)
-
-// Reference represents a GitHub reference.
-type Reference struct {
- Ref *string `json:"ref"`
- URL *string `json:"url"`
- Object *GitObject `json:"object"`
-}
-
-func (r Reference) String() string {
- return Stringify(r)
-}
-
-// GitObject represents a Git object.
-type GitObject struct {
- Type *string `json:"type"`
- SHA *string `json:"sha"`
- URL *string `json:"url"`
-}
-
-func (o GitObject) String() string {
- return Stringify(o)
-}
-
-// createRefRequest represents the payload for creating a reference.
-type createRefRequest struct {
- Ref *string `json:"ref"`
- SHA *string `json:"sha"`
-}
-
-// updateRefRequest represents the payload for updating a reference.
-type updateRefRequest struct {
- SHA *string `json:"sha"`
- Force *bool `json:"force"`
-}
-
-// GetRef fetches the Reference object for a given Git ref.
-//
-// GitHub API docs: http://developer.github.com/v3/git/refs/#get-a-reference
-func (s *GitService) GetRef(owner string, repo string, ref string) (*Reference, *Response, error) {
- ref = strings.TrimPrefix(ref, "refs/")
- u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- r := new(Reference)
- resp, err := s.client.Do(req, r)
- if err != nil {
- return nil, resp, err
- }
-
- return r, resp, err
-}
-
-// ReferenceListOptions specifies optional parameters to the
-// GitService.ListRefs method.
-type ReferenceListOptions struct {
- Type string `url:"-"`
-
- ListOptions
-}
-
-// ListRefs lists all refs in a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/git/refs/#get-all-references
-func (s *GitService) ListRefs(owner, repo string, opt *ReferenceListOptions) ([]*Reference, *Response, error) {
- var u string
- if opt != nil && opt.Type != "" {
- u = fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, opt.Type)
- } else {
- u = fmt.Sprintf("repos/%v/%v/git/refs", owner, repo)
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var rs []*Reference
- resp, err := s.client.Do(req, &rs)
- if err != nil {
- return nil, resp, err
- }
-
- return rs, resp, err
-}
-
-// CreateRef creates a new ref in a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/git/refs/#create-a-reference
-func (s *GitService) CreateRef(owner string, repo string, ref *Reference) (*Reference, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/git/refs", owner, repo)
- req, err := s.client.NewRequest("POST", u, &createRefRequest{
- // back-compat with previous behavior that didn't require 'refs/' prefix
- Ref: String("refs/" + strings.TrimPrefix(*ref.Ref, "refs/")),
- SHA: ref.Object.SHA,
- })
- if err != nil {
- return nil, nil, err
- }
-
- r := new(Reference)
- resp, err := s.client.Do(req, r)
- if err != nil {
- return nil, resp, err
- }
-
- return r, resp, err
-}
-
-// UpdateRef updates an existing ref in a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/git/refs/#update-a-reference
-func (s *GitService) UpdateRef(owner string, repo string, ref *Reference, force bool) (*Reference, *Response, error) {
- refPath := strings.TrimPrefix(*ref.Ref, "refs/")
- u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refPath)
- req, err := s.client.NewRequest("PATCH", u, &updateRefRequest{
- SHA: ref.Object.SHA,
- Force: &force,
- })
- if err != nil {
- return nil, nil, err
- }
-
- r := new(Reference)
- resp, err := s.client.Do(req, r)
- if err != nil {
- return nil, resp, err
- }
-
- return r, resp, err
-}
-
-// DeleteRef deletes a ref from a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/git/refs/#delete-a-reference
-func (s *GitService) DeleteRef(owner string, repo string, ref string) (*Response, error) {
- ref = strings.TrimPrefix(ref, "refs/")
- u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/git_refs_test.go b/vendor/src/github.com/google/go-github/github/git_refs_test.go
deleted file mode 100644
index cc4cd5a..0000000
--- a/vendor/src/github.com/google/go-github/github/git_refs_test.go
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestGitService_GetRef(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `
- {
- "ref": "refs/heads/b",
- "url": "https://api.github.com/repos/o/r/git/refs/heads/b",
- "object": {
- "type": "commit",
- "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd",
- "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"
- }
- }`)
- })
-
- ref, _, err := client.Git.GetRef("o", "r", "refs/heads/b")
- if err != nil {
- t.Errorf("Git.GetRef returned error: %v", err)
- }
-
- want := &Reference{
- Ref: String("refs/heads/b"),
- URL: String("https://api.github.com/repos/o/r/git/refs/heads/b"),
- Object: &GitObject{
- Type: String("commit"),
- SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
- URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"),
- },
- }
- if !reflect.DeepEqual(ref, want) {
- t.Errorf("Git.GetRef returned %+v, want %+v", ref, want)
- }
-
- // without 'refs/' prefix
- if _, _, err := client.Git.GetRef("o", "r", "heads/b"); err != nil {
- t.Errorf("Git.GetRef returned error: %v", err)
- }
-}
-
-func TestGitService_ListRefs(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/git/refs", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `
- [
- {
- "ref": "refs/heads/branchA",
- "url": "https://api.github.com/repos/o/r/git/refs/heads/branchA",
- "object": {
- "type": "commit",
- "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd",
- "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"
- }
- },
- {
- "ref": "refs/heads/branchB",
- "url": "https://api.github.com/repos/o/r/git/refs/heads/branchB",
- "object": {
- "type": "commit",
- "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd",
- "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"
- }
- }
- ]`)
- })
-
- refs, _, err := client.Git.ListRefs("o", "r", nil)
- if err != nil {
- t.Errorf("Git.ListRefs returned error: %v", err)
- }
-
- want := []*Reference{
- {
- Ref: String("refs/heads/branchA"),
- URL: String("https://api.github.com/repos/o/r/git/refs/heads/branchA"),
- Object: &GitObject{
- Type: String("commit"),
- SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
- URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"),
- },
- },
- {
- Ref: String("refs/heads/branchB"),
- URL: String("https://api.github.com/repos/o/r/git/refs/heads/branchB"),
- Object: &GitObject{
- Type: String("commit"),
- SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
- URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"),
- },
- },
- }
- if !reflect.DeepEqual(refs, want) {
- t.Errorf("Git.ListRefs returned %+v, want %+v", refs, want)
- }
-}
-
-func TestGitService_ListRefs_options(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/git/refs/t", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"ref": "r"}]`)
- })
-
- opt := &ReferenceListOptions{Type: "t", ListOptions: ListOptions{Page: 2}}
- refs, _, err := client.Git.ListRefs("o", "r", opt)
- if err != nil {
- t.Errorf("Git.ListRefs returned error: %v", err)
- }
-
- want := []*Reference{{Ref: String("r")}}
- if !reflect.DeepEqual(refs, want) {
- t.Errorf("Git.ListRefs returned %+v, want %+v", refs, want)
- }
-}
-
-func TestGitService_CreateRef(t *testing.T) {
- setup()
- defer teardown()
-
- args := &createRefRequest{
- Ref: String("refs/heads/b"),
- SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
- }
-
- mux.HandleFunc("/repos/o/r/git/refs", func(w http.ResponseWriter, r *http.Request) {
- v := new(createRefRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, args) {
- t.Errorf("Request body = %+v, want %+v", v, args)
- }
- fmt.Fprint(w, `
- {
- "ref": "refs/heads/b",
- "url": "https://api.github.com/repos/o/r/git/refs/heads/b",
- "object": {
- "type": "commit",
- "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd",
- "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"
- }
- }`)
- })
-
- ref, _, err := client.Git.CreateRef("o", "r", &Reference{
- Ref: String("refs/heads/b"),
- Object: &GitObject{
- SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
- },
- })
- if err != nil {
- t.Errorf("Git.CreateRef returned error: %v", err)
- }
-
- want := &Reference{
- Ref: String("refs/heads/b"),
- URL: String("https://api.github.com/repos/o/r/git/refs/heads/b"),
- Object: &GitObject{
- Type: String("commit"),
- SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
- URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"),
- },
- }
- if !reflect.DeepEqual(ref, want) {
- t.Errorf("Git.CreateRef returned %+v, want %+v", ref, want)
- }
-
- // without 'refs/' prefix
- _, _, err = client.Git.CreateRef("o", "r", &Reference{
- Ref: String("heads/b"),
- Object: &GitObject{
- SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
- },
- })
- if err != nil {
- t.Errorf("Git.CreateRef returned error: %v", err)
- }
-}
-
-func TestGitService_UpdateRef(t *testing.T) {
- setup()
- defer teardown()
-
- args := &updateRefRequest{
- SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
- Force: Bool(true),
- }
-
- mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) {
- v := new(updateRefRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, args) {
- t.Errorf("Request body = %+v, want %+v", v, args)
- }
- fmt.Fprint(w, `
- {
- "ref": "refs/heads/b",
- "url": "https://api.github.com/repos/o/r/git/refs/heads/b",
- "object": {
- "type": "commit",
- "sha": "aa218f56b14c9653891f9e74264a383fa43fefbd",
- "url": "https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"
- }
- }`)
- })
-
- ref, _, err := client.Git.UpdateRef("o", "r", &Reference{
- Ref: String("refs/heads/b"),
- Object: &GitObject{SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd")},
- }, true)
- if err != nil {
- t.Errorf("Git.UpdateRef returned error: %v", err)
- }
-
- want := &Reference{
- Ref: String("refs/heads/b"),
- URL: String("https://api.github.com/repos/o/r/git/refs/heads/b"),
- Object: &GitObject{
- Type: String("commit"),
- SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd"),
- URL: String("https://api.github.com/repos/o/r/git/commits/aa218f56b14c9653891f9e74264a383fa43fefbd"),
- },
- }
- if !reflect.DeepEqual(ref, want) {
- t.Errorf("Git.UpdateRef returned %+v, want %+v", ref, want)
- }
-
- // without 'refs/' prefix
- _, _, err = client.Git.UpdateRef("o", "r", &Reference{
- Ref: String("heads/b"),
- Object: &GitObject{SHA: String("aa218f56b14c9653891f9e74264a383fa43fefbd")},
- }, true)
- if err != nil {
- t.Errorf("Git.UpdateRef returned error: %v", err)
- }
-}
-
-func TestGitService_DeleteRef(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/git/refs/heads/b", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Git.DeleteRef("o", "r", "refs/heads/b")
- if err != nil {
- t.Errorf("Git.DeleteRef returned error: %v", err)
- }
-
- // without 'refs/' prefix
- if _, err := client.Git.DeleteRef("o", "r", "heads/b"); err != nil {
- t.Errorf("Git.DeleteRef returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/git_tags.go b/vendor/src/github.com/google/go-github/github/git_tags.go
deleted file mode 100644
index 01b9cb2..0000000
--- a/vendor/src/github.com/google/go-github/github/git_tags.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
-)
-
-// Tag represents a tag object.
-type Tag struct {
- Tag *string `json:"tag,omitempty"`
- SHA *string `json:"sha,omitempty"`
- URL *string `json:"url,omitempty"`
- Message *string `json:"message,omitempty"`
- Tagger *CommitAuthor `json:"tagger,omitempty"`
- Object *GitObject `json:"object,omitempty"`
- Verification *SignatureVerification `json:"verification,omitempty"`
-}
-
-// createTagRequest represents the body of a CreateTag request. This is mostly
-// identical to Tag with the exception that the object SHA and Type are
-// top-level fields, rather than being nested inside a JSON object.
-type createTagRequest struct {
- Tag *string `json:"tag,omitempty"`
- Message *string `json:"message,omitempty"`
- Object *string `json:"object,omitempty"`
- Type *string `json:"type,omitempty"`
- Tagger *CommitAuthor `json:"tagger,omitempty"`
-}
-
-// GetTag fetchs a tag from a repo given a SHA.
-//
-// GitHub API docs: http://developer.github.com/v3/git/tags/#get-a-tag
-func (s *GitService) GetTag(owner string, repo string, sha string) (*Tag, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/git/tags/%v", owner, repo, sha)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeGitSigningPreview)
-
- tag := new(Tag)
- resp, err := s.client.Do(req, tag)
- return tag, resp, err
-}
-
-// CreateTag creates a tag object.
-//
-// GitHub API docs: http://developer.github.com/v3/git/tags/#create-a-tag-object
-func (s *GitService) CreateTag(owner string, repo string, tag *Tag) (*Tag, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/git/tags", owner, repo)
-
- // convert Tag into a createTagRequest
- tagRequest := &createTagRequest{
- Tag: tag.Tag,
- Message: tag.Message,
- Tagger: tag.Tagger,
- }
- if tag.Object != nil {
- tagRequest.Object = tag.Object.SHA
- tagRequest.Type = tag.Object.Type
- }
-
- req, err := s.client.NewRequest("POST", u, tagRequest)
- if err != nil {
- return nil, nil, err
- }
-
- t := new(Tag)
- resp, err := s.client.Do(req, t)
- return t, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/git_tags_test.go b/vendor/src/github.com/google/go-github/github/git_tags_test.go
deleted file mode 100644
index c44361d..0000000
--- a/vendor/src/github.com/google/go-github/github/git_tags_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestGitService_GetTag(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/git/tags/s", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeGitSigningPreview)
-
- fmt.Fprint(w, `{"tag": "t"}`)
- })
-
- tag, _, err := client.Git.GetTag("o", "r", "s")
- if err != nil {
- t.Errorf("Git.GetTag returned error: %v", err)
- }
-
- want := &Tag{Tag: String("t")}
- if !reflect.DeepEqual(tag, want) {
- t.Errorf("Git.GetTag returned %+v, want %+v", tag, want)
- }
-}
-
-func TestGitService_CreateTag(t *testing.T) {
- setup()
- defer teardown()
-
- input := &createTagRequest{Tag: String("t"), Object: String("s")}
-
- mux.HandleFunc("/repos/o/r/git/tags", func(w http.ResponseWriter, r *http.Request) {
- v := new(createTagRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"tag": "t"}`)
- })
-
- tag, _, err := client.Git.CreateTag("o", "r", &Tag{
- Tag: input.Tag,
- Object: &GitObject{SHA: input.Object},
- })
- if err != nil {
- t.Errorf("Git.CreateTag returned error: %v", err)
- }
-
- want := &Tag{Tag: String("t")}
- if !reflect.DeepEqual(tag, want) {
- t.Errorf("Git.GetTag returned %+v, want %+v", tag, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/git_trees.go b/vendor/src/github.com/google/go-github/github/git_trees.go
deleted file mode 100644
index 9efa4b3..0000000
--- a/vendor/src/github.com/google/go-github/github/git_trees.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// Tree represents a GitHub tree.
-type Tree struct {
- SHA *string `json:"sha,omitempty"`
- Entries []TreeEntry `json:"tree,omitempty"`
-}
-
-func (t Tree) String() string {
- return Stringify(t)
-}
-
-// TreeEntry represents the contents of a tree structure. TreeEntry can
-// represent either a blob, a commit (in the case of a submodule), or another
-// tree.
-type TreeEntry struct {
- SHA *string `json:"sha,omitempty"`
- Path *string `json:"path,omitempty"`
- Mode *string `json:"mode,omitempty"`
- Type *string `json:"type,omitempty"`
- Size *int `json:"size,omitempty"`
- Content *string `json:"content,omitempty"`
-}
-
-func (t TreeEntry) String() string {
- return Stringify(t)
-}
-
-// GetTree fetches the Tree object for a given sha hash from a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/git/trees/#get-a-tree
-func (s *GitService) GetTree(owner string, repo string, sha string, recursive bool) (*Tree, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/git/trees/%v", owner, repo, sha)
- if recursive {
- u += "?recursive=1"
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- t := new(Tree)
- resp, err := s.client.Do(req, t)
- if err != nil {
- return nil, resp, err
- }
-
- return t, resp, err
-}
-
-// createTree represents the body of a CreateTree request.
-type createTree struct {
- BaseTree string `json:"base_tree,omitempty"`
- Entries []TreeEntry `json:"tree"`
-}
-
-// CreateTree creates a new tree in a repository. If both a tree and a nested
-// path modifying that tree are specified, it will overwrite the contents of
-// that tree with the new path contents and write a new tree out.
-//
-// GitHub API docs: http://developer.github.com/v3/git/trees/#create-a-tree
-func (s *GitService) CreateTree(owner string, repo string, baseTree string, entries []TreeEntry) (*Tree, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/git/trees", owner, repo)
-
- body := &createTree{
- BaseTree: baseTree,
- Entries: entries,
- }
- req, err := s.client.NewRequest("POST", u, body)
- if err != nil {
- return nil, nil, err
- }
-
- t := new(Tree)
- resp, err := s.client.Do(req, t)
- if err != nil {
- return nil, resp, err
- }
-
- return t, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/git_trees_test.go b/vendor/src/github.com/google/go-github/github/git_trees_test.go
deleted file mode 100644
index 99ec4f3..0000000
--- a/vendor/src/github.com/google/go-github/github/git_trees_test.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestGitService_GetTree(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/git/trees/s", func(w http.ResponseWriter, r *http.Request) {
- if m := "GET"; m != r.Method {
- t.Errorf("Request method = %v, want %v", r.Method, m)
- }
- fmt.Fprint(w, `{
- "sha": "s",
- "tree": [ { "type": "blob" } ]
- }`)
- })
-
- tree, _, err := client.Git.GetTree("o", "r", "s", true)
- if err != nil {
- t.Errorf("Git.GetTree returned error: %v", err)
- }
-
- want := Tree{
- SHA: String("s"),
- Entries: []TreeEntry{
- {
- Type: String("blob"),
- },
- },
- }
- if !reflect.DeepEqual(*tree, want) {
- t.Errorf("Tree.Get returned %+v, want %+v", *tree, want)
- }
-}
-
-func TestGitService_GetTree_invalidOwner(t *testing.T) {
- _, _, err := client.Git.GetTree("%", "%", "%", false)
- testURLParseError(t, err)
-}
-
-func TestGitService_CreateTree(t *testing.T) {
- setup()
- defer teardown()
-
- input := []TreeEntry{
- {
- Path: String("file.rb"),
- Mode: String("100644"),
- Type: String("blob"),
- SHA: String("7c258a9869f33c1e1e1f74fbb32f07c86cb5a75b"),
- },
- }
-
- mux.HandleFunc("/repos/o/r/git/trees", func(w http.ResponseWriter, r *http.Request) {
- v := new(createTree)
- json.NewDecoder(r.Body).Decode(v)
-
- if m := "POST"; m != r.Method {
- t.Errorf("Request method = %v, want %v", r.Method, m)
- }
-
- want := &createTree{
- BaseTree: "b",
- Entries: input,
- }
- if !reflect.DeepEqual(v, want) {
- t.Errorf("Git.CreateTree request body: %+v, want %+v", v, want)
- }
-
- fmt.Fprint(w, `{
- "sha": "cd8274d15fa3ae2ab983129fb037999f264ba9a7",
- "tree": [
- {
- "path": "file.rb",
- "mode": "100644",
- "type": "blob",
- "size": 132,
- "sha": "7c258a9869f33c1e1e1f74fbb32f07c86cb5a75b"
- }
- ]
- }`)
- })
-
- tree, _, err := client.Git.CreateTree("o", "r", "b", input)
- if err != nil {
- t.Errorf("Git.CreateTree returned error: %v", err)
- }
-
- want := Tree{
- String("cd8274d15fa3ae2ab983129fb037999f264ba9a7"),
- []TreeEntry{
- {
- Path: String("file.rb"),
- Mode: String("100644"),
- Type: String("blob"),
- Size: Int(132),
- SHA: String("7c258a9869f33c1e1e1f74fbb32f07c86cb5a75b"),
- },
- },
- }
-
- if !reflect.DeepEqual(*tree, want) {
- t.Errorf("Git.CreateTree returned %+v, want %+v", *tree, want)
- }
-}
-
-func TestGitService_CreateTree_Content(t *testing.T) {
- setup()
- defer teardown()
-
- input := []TreeEntry{
- {
- Path: String("content.md"),
- Mode: String("100644"),
- Content: String("file content"),
- },
- }
-
- mux.HandleFunc("/repos/o/r/git/trees", func(w http.ResponseWriter, r *http.Request) {
- v := new(createTree)
- json.NewDecoder(r.Body).Decode(v)
-
- if m := "POST"; m != r.Method {
- t.Errorf("Request method = %v, want %v", r.Method, m)
- }
-
- want := &createTree{
- BaseTree: "b",
- Entries: input,
- }
- if !reflect.DeepEqual(v, want) {
- t.Errorf("Git.CreateTree request body: %+v, want %+v", v, want)
- }
-
- fmt.Fprint(w, `{
- "sha": "5c6780ad2c68743383b740fd1dab6f6a33202b11",
- "url": "https://api.github.com/repos/o/r/git/trees/5c6780ad2c68743383b740fd1dab6f6a33202b11",
- "tree": [
- {
- "mode": "100644",
- "type": "blob",
- "sha": "aad8feacf6f8063150476a7b2bd9770f2794c08b",
- "path": "content.md",
- "size": 12,
- "url": "https://api.github.com/repos/o/r/git/blobs/aad8feacf6f8063150476a7b2bd9770f2794c08b"
- }
- ]
- }`)
- })
-
- tree, _, err := client.Git.CreateTree("o", "r", "b", input)
- if err != nil {
- t.Errorf("Git.CreateTree returned error: %v", err)
- }
-
- want := Tree{
- String("5c6780ad2c68743383b740fd1dab6f6a33202b11"),
- []TreeEntry{
- {
- Path: String("content.md"),
- Mode: String("100644"),
- Type: String("blob"),
- Size: Int(12),
- SHA: String("aad8feacf6f8063150476a7b2bd9770f2794c08b"),
- },
- },
- }
-
- if !reflect.DeepEqual(*tree, want) {
- t.Errorf("Git.CreateTree returned %+v, want %+v", *tree, want)
- }
-}
-
-func TestGitService_CreateTree_invalidOwner(t *testing.T) {
- _, _, err := client.Git.CreateTree("%", "%", "", nil)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/github.go b/vendor/src/github.com/google/go-github/github/github.go
deleted file mode 100644
index 591bc54..0000000
--- a/vendor/src/github.com/google/go-github/github/github.go
+++ /dev/null
@@ -1,807 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/google/go-querystring/query"
-)
-
-const (
- // StatusUnprocessableEntity is the status code returned when sending a request with invalid fields.
- StatusUnprocessableEntity = 422
-)
-
-const (
- libraryVersion = "2"
- defaultBaseURL = "https://api.github.com/"
- uploadBaseURL = "https://uploads.github.com/"
- userAgent = "go-github/" + libraryVersion
-
- headerRateLimit = "X-RateLimit-Limit"
- headerRateRemaining = "X-RateLimit-Remaining"
- headerRateReset = "X-RateLimit-Reset"
- headerOTP = "X-GitHub-OTP"
-
- mediaTypeV3 = "application/vnd.github.v3+json"
- defaultMediaType = "application/octet-stream"
- mediaTypeV3SHA = "application/vnd.github.v3.sha"
- mediaTypeOrgPermissionRepo = "application/vnd.github.v3.repository+json"
-
- // Media Type values to access preview APIs
-
- // https://developer.github.com/changes/2015-03-09-licenses-api/
- mediaTypeLicensesPreview = "application/vnd.github.drax-preview+json"
-
- // https://developer.github.com/changes/2014-12-09-new-attributes-for-stars-api/
- mediaTypeStarringPreview = "application/vnd.github.v3.star+json"
-
- // https://developer.github.com/changes/2015-11-11-protected-branches-api/
- mediaTypeProtectedBranchesPreview = "application/vnd.github.loki-preview+json"
-
- // https://help.github.com/enterprise/2.4/admin/guides/migrations/exporting-the-github-com-organization-s-repositories/
- mediaTypeMigrationsPreview = "application/vnd.github.wyandotte-preview+json"
-
- // https://developer.github.com/changes/2016-04-06-deployment-and-deployment-status-enhancements/
- mediaTypeDeploymentStatusPreview = "application/vnd.github.ant-man-preview+json"
-
- // https://developer.github.com/changes/2016-02-19-source-import-preview-api/
- mediaTypeImportPreview = "application/vnd.github.barred-rock-preview"
-
- // https://developer.github.com/changes/2016-05-12-reactions-api-preview/
- mediaTypeReactionsPreview = "application/vnd.github.squirrel-girl-preview"
-
- // https://developer.github.com/changes/2016-04-01-squash-api-preview/
- mediaTypeSquashPreview = "application/vnd.github.polaris-preview+json"
-
- // https://developer.github.com/changes/2016-04-04-git-signing-api-preview/
- mediaTypeGitSigningPreview = "application/vnd.github.cryptographer-preview+json"
-
- // https://developer.github.com/changes/2016-05-23-timeline-preview-api/
- mediaTypeTimelinePreview = "application/vnd.github.mockingbird-preview+json"
-
- // https://developer.github.com/changes/2016-06-14-repository-invitations/
- mediaTypeRepositoryInvitationsPreview = "application/vnd.github.swamp-thing-preview+json"
-
- // https://developer.github.com/changes/2016-04-21-oauth-authorizations-grants-api-preview/
- mediaTypeOAuthGrantAuthorizationsPreview = "application/vnd.github.damage-preview+json"
-
- // https://developer.github.com/changes/2016-07-06-github-pages-preiew-api/
- mediaTypePagesPreview = "application/vnd.github.mister-fantastic-preview+json"
-)
-
-// A Client manages communication with the GitHub API.
-type Client struct {
- clientMu sync.Mutex // clientMu protects the client during calls that modify the CheckRedirect func.
- client *http.Client // HTTP client used to communicate with the API.
-
- // Base URL for API requests. Defaults to the public GitHub API, but can be
- // set to a domain endpoint to use with GitHub Enterprise. BaseURL should
- // always be specified with a trailing slash.
- BaseURL *url.URL
-
- // Base URL for uploading files.
- UploadURL *url.URL
-
- // User agent used when communicating with the GitHub API.
- UserAgent string
-
- rateMu sync.Mutex
- rateLimits [categories]Rate // Rate limits for the client as determined by the most recent API calls.
- mostRecent rateLimitCategory
-
- common service // Reuse a single struct instead of allocating one for each service on the heap.
-
- // Services used for talking to different parts of the GitHub API.
- Activity *ActivityService
- Authorizations *AuthorizationsService
- Gists *GistsService
- Git *GitService
- Gitignores *GitignoresService
- Issues *IssuesService
- Organizations *OrganizationsService
- PullRequests *PullRequestsService
- Repositories *RepositoriesService
- Search *SearchService
- Users *UsersService
- Licenses *LicensesService
- Migrations *MigrationService
- Reactions *ReactionsService
-}
-
-type service struct {
- client *Client
-}
-
-// ListOptions specifies the optional parameters to various List methods that
-// support pagination.
-type ListOptions struct {
- // For paginated result sets, page of results to retrieve.
- Page int `url:"page,omitempty"`
-
- // For paginated result sets, the number of results to include per page.
- PerPage int `url:"per_page,omitempty"`
-}
-
-// UploadOptions specifies the parameters to methods that support uploads.
-type UploadOptions struct {
- Name string `url:"name,omitempty"`
-}
-
-// addOptions adds the parameters in opt as URL query parameters to s. opt
-// must be a struct whose fields may contain "url" tags.
-func addOptions(s string, opt interface{}) (string, error) {
- v := reflect.ValueOf(opt)
- if v.Kind() == reflect.Ptr && v.IsNil() {
- return s, nil
- }
-
- u, err := url.Parse(s)
- if err != nil {
- return s, err
- }
-
- qs, err := query.Values(opt)
- if err != nil {
- return s, err
- }
-
- u.RawQuery = qs.Encode()
- return u.String(), nil
-}
-
-// NewClient returns a new GitHub API client. If a nil httpClient is
-// provided, http.DefaultClient will be used. To use API methods which require
-// authentication, provide an http.Client that will perform the authentication
-// for you (such as that provided by the golang.org/x/oauth2 library).
-func NewClient(httpClient *http.Client) *Client {
- if httpClient == nil {
- httpClient = http.DefaultClient
- }
- baseURL, _ := url.Parse(defaultBaseURL)
- uploadURL, _ := url.Parse(uploadBaseURL)
-
- c := &Client{client: httpClient, BaseURL: baseURL, UserAgent: userAgent, UploadURL: uploadURL}
- c.common.client = c
- c.Activity = (*ActivityService)(&c.common)
- c.Authorizations = (*AuthorizationsService)(&c.common)
- c.Gists = (*GistsService)(&c.common)
- c.Git = (*GitService)(&c.common)
- c.Gitignores = (*GitignoresService)(&c.common)
- c.Issues = (*IssuesService)(&c.common)
- c.Licenses = (*LicensesService)(&c.common)
- c.Migrations = (*MigrationService)(&c.common)
- c.Organizations = (*OrganizationsService)(&c.common)
- c.PullRequests = (*PullRequestsService)(&c.common)
- c.Reactions = (*ReactionsService)(&c.common)
- c.Repositories = (*RepositoriesService)(&c.common)
- c.Search = (*SearchService)(&c.common)
- c.Users = (*UsersService)(&c.common)
- return c
-}
-
-// NewRequest creates an API request. A relative URL can be provided in urlStr,
-// in which case it is resolved relative to the BaseURL of the Client.
-// Relative URLs should always be specified without a preceding slash. If
-// specified, the value pointed to by body is JSON encoded and included as the
-// request body.
-func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) {
- rel, err := url.Parse(urlStr)
- if err != nil {
- return nil, err
- }
-
- u := c.BaseURL.ResolveReference(rel)
-
- var buf io.ReadWriter
- if body != nil {
- buf = new(bytes.Buffer)
- err := json.NewEncoder(buf).Encode(body)
- if err != nil {
- return nil, err
- }
- }
-
- req, err := http.NewRequest(method, u.String(), buf)
- if err != nil {
- return nil, err
- }
-
- if body != nil {
- req.Header.Set("Content-Type", "application/json")
- }
- req.Header.Set("Accept", mediaTypeV3)
- if c.UserAgent != "" {
- req.Header.Set("User-Agent", c.UserAgent)
- }
- return req, nil
-}
-
-// NewUploadRequest creates an upload request. A relative URL can be provided in
-// urlStr, in which case it is resolved relative to the UploadURL of the Client.
-// Relative URLs should always be specified without a preceding slash.
-func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, mediaType string) (*http.Request, error) {
- rel, err := url.Parse(urlStr)
- if err != nil {
- return nil, err
- }
-
- u := c.UploadURL.ResolveReference(rel)
- req, err := http.NewRequest("POST", u.String(), reader)
- if err != nil {
- return nil, err
- }
- req.ContentLength = size
-
- if mediaType == "" {
- mediaType = defaultMediaType
- }
- req.Header.Set("Content-Type", mediaType)
- req.Header.Set("Accept", mediaTypeV3)
- req.Header.Set("User-Agent", c.UserAgent)
- return req, nil
-}
-
-// Response is a GitHub API response. This wraps the standard http.Response
-// returned from GitHub and provides convenient access to things like
-// pagination links.
-type Response struct {
- *http.Response
-
- // These fields provide the page values for paginating through a set of
- // results. Any or all of these may be set to the zero value for
- // responses that are not part of a paginated set, or for which there
- // are no additional pages.
-
- NextPage int
- PrevPage int
- FirstPage int
- LastPage int
-
- Rate
-}
-
-// newResponse creates a new Response for the provided http.Response.
-func newResponse(r *http.Response) *Response {
- response := &Response{Response: r}
- response.populatePageValues()
- response.Rate = parseRate(r)
- return response
-}
-
-// populatePageValues parses the HTTP Link response headers and populates the
-// various pagination link values in the Response.
-func (r *Response) populatePageValues() {
- if links, ok := r.Response.Header["Link"]; ok && len(links) > 0 {
- for _, link := range strings.Split(links[0], ",") {
- segments := strings.Split(strings.TrimSpace(link), ";")
-
- // link must at least have href and rel
- if len(segments) < 2 {
- continue
- }
-
- // ensure href is properly formatted
- if !strings.HasPrefix(segments[0], "<") || !strings.HasSuffix(segments[0], ">") {
- continue
- }
-
- // try to pull out page parameter
- url, err := url.Parse(segments[0][1 : len(segments[0])-1])
- if err != nil {
- continue
- }
- page := url.Query().Get("page")
- if page == "" {
- continue
- }
-
- for _, segment := range segments[1:] {
- switch strings.TrimSpace(segment) {
- case `rel="next"`:
- r.NextPage, _ = strconv.Atoi(page)
- case `rel="prev"`:
- r.PrevPage, _ = strconv.Atoi(page)
- case `rel="first"`:
- r.FirstPage, _ = strconv.Atoi(page)
- case `rel="last"`:
- r.LastPage, _ = strconv.Atoi(page)
- }
-
- }
- }
- }
-}
-
-// parseRate parses the rate related headers.
-func parseRate(r *http.Response) Rate {
- var rate Rate
- if limit := r.Header.Get(headerRateLimit); limit != "" {
- rate.Limit, _ = strconv.Atoi(limit)
- }
- if remaining := r.Header.Get(headerRateRemaining); remaining != "" {
- rate.Remaining, _ = strconv.Atoi(remaining)
- }
- if reset := r.Header.Get(headerRateReset); reset != "" {
- if v, _ := strconv.ParseInt(reset, 10, 64); v != 0 {
- rate.Reset = Timestamp{time.Unix(v, 0)}
- }
- }
- return rate
-}
-
-// Rate specifies the current rate limit for the client as determined by the
-// most recent API call. If the client is used in a multi-user application,
-// this rate may not always be up-to-date.
-//
-// Deprecated: Use the Response.Rate returned from most recent API call instead.
-// Call RateLimits() to check the current rate.
-func (c *Client) Rate() Rate {
- c.rateMu.Lock()
- rate := c.rateLimits[c.mostRecent]
- c.rateMu.Unlock()
- return rate
-}
-
-// Do sends an API request and returns the API response. The API response is
-// JSON decoded and stored in the value pointed to by v, or returned as an
-// error if an API error has occurred. If v implements the io.Writer
-// interface, the raw response body will be written to v, without attempting to
-// first decode it. If rate limit is exceeded and reset time is in the future,
-// Do returns *RateLimitError immediately without making a network API call.
-func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
- rateLimitCategory := category(req.URL.Path)
-
- // If we've hit rate limit, don't make further requests before Reset time.
- if err := c.checkRateLimitBeforeDo(req, rateLimitCategory); err != nil {
- return nil, err
- }
-
- resp, err := c.client.Do(req)
- if err != nil {
- return nil, err
- }
-
- defer func() {
- // Drain up to 512 bytes and close the body to let the Transport reuse the connection
- io.CopyN(ioutil.Discard, resp.Body, 512)
- resp.Body.Close()
- }()
-
- response := newResponse(resp)
-
- c.rateMu.Lock()
- c.rateLimits[rateLimitCategory] = response.Rate
- c.mostRecent = rateLimitCategory
- c.rateMu.Unlock()
-
- err = CheckResponse(resp)
- if err != nil {
- // even though there was an error, we still return the response
- // in case the caller wants to inspect it further
- return response, err
- }
-
- if v != nil {
- if w, ok := v.(io.Writer); ok {
- io.Copy(w, resp.Body)
- } else {
- err = json.NewDecoder(resp.Body).Decode(v)
- if err == io.EOF {
- err = nil // ignore EOF errors caused by empty response body
- }
- }
- }
-
- return response, err
-}
-
-// checkRateLimitBeforeDo does not make any network calls, but uses existing knowledge from
-// current client state in order to quickly check if *RateLimitError can be immediately returned
-// from Client.Do, and if so, returns it so that Client.Do can skip making a network API call unneccessarily.
-// Otherwise it returns nil, and Client.Do should proceed normally.
-func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rateLimitCategory) error {
- c.rateMu.Lock()
- rate := c.rateLimits[rateLimitCategory]
- c.rateMu.Unlock()
- if !rate.Reset.Time.IsZero() && rate.Remaining == 0 && time.Now().Before(rate.Reset.Time) {
- // Create a fake response.
- resp := &http.Response{
- Status: http.StatusText(http.StatusForbidden),
- StatusCode: http.StatusForbidden,
- Request: req,
- Header: make(http.Header),
- Body: ioutil.NopCloser(strings.NewReader("")),
- }
- return &RateLimitError{
- Rate: rate,
- Response: resp,
- Message: fmt.Sprintf("API rate limit of %v still exceeded until %v, not making remote request.", rate.Limit, rate.Reset.Time),
- }
- }
-
- return nil
-}
-
-/*
-An ErrorResponse reports one or more errors caused by an API request.
-
-GitHub API docs: http://developer.github.com/v3/#client-errors
-*/
-type ErrorResponse struct {
- Response *http.Response // HTTP response that caused this error
- Message string `json:"message"` // error message
- Errors []Error `json:"errors"` // more detail on individual errors
- // Block is only populated on certain types of errors such as code 451.
- // See https://developer.github.com/changes/2016-03-17-the-451-status-code-is-now-supported/
- // for more information.
- Block *struct {
- Reason string `json:"reason,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- } `json:"block,omitempty"`
- // Most errors will also include a documentation_url field pointing
- // to some content that might help you resolve the error, see
- // https://developer.github.com/v3/#client-errors
- DocumentationURL string `json:"documentation_url,omitempty"`
-}
-
-func (r *ErrorResponse) Error() string {
- return fmt.Sprintf("%v %v: %d %v %+v",
- r.Response.Request.Method, sanitizeURL(r.Response.Request.URL),
- r.Response.StatusCode, r.Message, r.Errors)
-}
-
-// TwoFactorAuthError occurs when using HTTP Basic Authentication for a user
-// that has two-factor authentication enabled. The request can be reattempted
-// by providing a one-time password in the request.
-type TwoFactorAuthError ErrorResponse
-
-func (r *TwoFactorAuthError) Error() string { return (*ErrorResponse)(r).Error() }
-
-// RateLimitError occurs when GitHub returns 403 Forbidden response with a rate limit
-// remaining value of 0, and error message starts with "API rate limit exceeded for ".
-type RateLimitError struct {
- Rate Rate // Rate specifies last known rate limit for the client
- Response *http.Response // HTTP response that caused this error
- Message string `json:"message"` // error message
-}
-
-func (r *RateLimitError) Error() string {
- return fmt.Sprintf("%v %v: %d %v; rate reset in %v",
- r.Response.Request.Method, sanitizeURL(r.Response.Request.URL),
- r.Response.StatusCode, r.Message, r.Rate.Reset.Time.Sub(time.Now()))
-}
-
-// sanitizeURL redacts the client_secret parameter from the URL which may be
-// exposed to the user, specifically in the ErrorResponse error message.
-func sanitizeURL(uri *url.URL) *url.URL {
- if uri == nil {
- return nil
- }
- params := uri.Query()
- if len(params.Get("client_secret")) > 0 {
- params.Set("client_secret", "REDACTED")
- uri.RawQuery = params.Encode()
- }
- return uri
-}
-
-/*
-An Error reports more details on an individual error in an ErrorResponse.
-These are the possible validation error codes:
-
- missing:
- resource does not exist
- missing_field:
- a required field on a resource has not been set
- invalid:
- the formatting of a field is invalid
- already_exists:
- another resource has the same valid as this field
- custom:
- some resources return this (e.g. github.User.CreateKey()), additional
- information is set in the Message field of the Error
-
-GitHub API docs: http://developer.github.com/v3/#client-errors
-*/
-type Error struct {
- Resource string `json:"resource"` // resource on which the error occurred
- Field string `json:"field"` // field on which the error occurred
- Code string `json:"code"` // validation error code
- Message string `json:"message"` // Message describing the error. Errors with Code == "custom" will always have this set.
-}
-
-func (e *Error) Error() string {
- return fmt.Sprintf("%v error caused by %v field on %v resource",
- e.Code, e.Field, e.Resource)
-}
-
-// CheckResponse checks the API response for errors, and returns them if
-// present. A response is considered an error if it has a status code outside
-// the 200 range. API error responses are expected to have either no response
-// body, or a JSON response body that maps to ErrorResponse. Any other
-// response body will be silently ignored.
-//
-// The error type will be *RateLimitError for rate limit exceeded errors,
-// and *TwoFactorAuthError for two-factor authentication errors.
-func CheckResponse(r *http.Response) error {
- if c := r.StatusCode; 200 <= c && c <= 299 {
- return nil
- }
- errorResponse := &ErrorResponse{Response: r}
- data, err := ioutil.ReadAll(r.Body)
- if err == nil && data != nil {
- json.Unmarshal(data, errorResponse)
- }
- switch {
- case r.StatusCode == http.StatusUnauthorized && strings.HasPrefix(r.Header.Get(headerOTP), "required"):
- return (*TwoFactorAuthError)(errorResponse)
- case r.StatusCode == http.StatusForbidden && r.Header.Get(headerRateRemaining) == "0" && strings.HasPrefix(errorResponse.Message, "API rate limit exceeded for "):
- return &RateLimitError{
- Rate: parseRate(r),
- Response: errorResponse.Response,
- Message: errorResponse.Message,
- }
- default:
- return errorResponse
- }
-}
-
-// parseBoolResponse determines the boolean result from a GitHub API response.
-// Several GitHub API methods return boolean responses indicated by the HTTP
-// status code in the response (true indicated by a 204, false indicated by a
-// 404). This helper function will determine that result and hide the 404
-// error if present. Any other error will be returned through as-is.
-func parseBoolResponse(err error) (bool, error) {
- if err == nil {
- return true, nil
- }
-
- if err, ok := err.(*ErrorResponse); ok && err.Response.StatusCode == http.StatusNotFound {
- // Simply false. In this one case, we do not pass the error through.
- return false, nil
- }
-
- // some other real error occurred
- return false, err
-}
-
-// Rate represents the rate limit for the current client.
-type Rate struct {
- // The number of requests per hour the client is currently limited to.
- Limit int `json:"limit"`
-
- // The number of remaining requests the client can make this hour.
- Remaining int `json:"remaining"`
-
- // The time at which the current rate limit will reset.
- Reset Timestamp `json:"reset"`
-}
-
-func (r Rate) String() string {
- return Stringify(r)
-}
-
-// RateLimits represents the rate limits for the current client.
-type RateLimits struct {
- // The rate limit for non-search API requests. Unauthenticated
- // requests are limited to 60 per hour. Authenticated requests are
- // limited to 5,000 per hour.
- //
- // GitHub API docs: https://developer.github.com/v3/#rate-limiting
- Core *Rate `json:"core"`
-
- // The rate limit for search API requests. Unauthenticated requests
- // are limited to 5 requests per minutes. Authenticated requests are
- // limited to 20 per minute.
- //
- // GitHub API docs: https://developer.github.com/v3/search/#rate-limit
- Search *Rate `json:"search"`
-}
-
-func (r RateLimits) String() string {
- return Stringify(r)
-}
-
-type rateLimitCategory uint8
-
-const (
- coreCategory rateLimitCategory = iota
- searchCategory
-
- categories // An array of this length will be able to contain all rate limit categories.
-)
-
-// category returns the rate limit category of the endpoint, determined by Request.URL.Path.
-func category(path string) rateLimitCategory {
- switch {
- default:
- return coreCategory
- case strings.HasPrefix(path, "/search/"):
- return searchCategory
- }
-}
-
-// Deprecated: RateLimit is deprecated, use RateLimits instead.
-func (c *Client) RateLimit() (*Rate, *Response, error) {
- limits, resp, err := c.RateLimits()
- if limits == nil {
- return nil, nil, err
- }
-
- return limits.Core, resp, err
-}
-
-// RateLimits returns the rate limits for the current client.
-func (c *Client) RateLimits() (*RateLimits, *Response, error) {
- req, err := c.NewRequest("GET", "rate_limit", nil)
- if err != nil {
- return nil, nil, err
- }
-
- response := new(struct {
- Resources *RateLimits `json:"resources"`
- })
- resp, err := c.Do(req, response)
- if err != nil {
- return nil, nil, err
- }
-
- if response.Resources != nil {
- c.rateMu.Lock()
- if response.Resources.Core != nil {
- c.rateLimits[coreCategory] = *response.Resources.Core
- }
- if response.Resources.Search != nil {
- c.rateLimits[searchCategory] = *response.Resources.Search
- }
- c.rateMu.Unlock()
- }
-
- return response.Resources, resp, err
-}
-
-/*
-UnauthenticatedRateLimitedTransport allows you to make unauthenticated calls
-that need to use a higher rate limit associated with your OAuth application.
-
- t := &github.UnauthenticatedRateLimitedTransport{
- ClientID: "your app's client ID",
- ClientSecret: "your app's client secret",
- }
- client := github.NewClient(t.Client())
-
-This will append the querystring params client_id=xxx&client_secret=yyy to all
-requests.
-
-See http://developer.github.com/v3/#unauthenticated-rate-limited-requests for
-more information.
-*/
-type UnauthenticatedRateLimitedTransport struct {
- // ClientID is the GitHub OAuth client ID of the current application, which
- // can be found by selecting its entry in the list at
- // https://github.com/settings/applications.
- ClientID string
-
- // ClientSecret is the GitHub OAuth client secret of the current
- // application.
- ClientSecret string
-
- // Transport is the underlying HTTP transport to use when making requests.
- // It will default to http.DefaultTransport if nil.
- Transport http.RoundTripper
-}
-
-// RoundTrip implements the RoundTripper interface.
-func (t *UnauthenticatedRateLimitedTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- if t.ClientID == "" {
- return nil, errors.New("t.ClientID is empty")
- }
- if t.ClientSecret == "" {
- return nil, errors.New("t.ClientSecret is empty")
- }
-
- // To set extra querystring params, we must make a copy of the Request so
- // that we don't modify the Request we were given. This is required by the
- // specification of http.RoundTripper.
- req = cloneRequest(req)
- q := req.URL.Query()
- q.Set("client_id", t.ClientID)
- q.Set("client_secret", t.ClientSecret)
- req.URL.RawQuery = q.Encode()
-
- // Make the HTTP request.
- return t.transport().RoundTrip(req)
-}
-
-// Client returns an *http.Client that makes requests which are subject to the
-// rate limit of your OAuth application.
-func (t *UnauthenticatedRateLimitedTransport) Client() *http.Client {
- return &http.Client{Transport: t}
-}
-
-func (t *UnauthenticatedRateLimitedTransport) transport() http.RoundTripper {
- if t.Transport != nil {
- return t.Transport
- }
- return http.DefaultTransport
-}
-
-// BasicAuthTransport is an http.RoundTripper that authenticates all requests
-// using HTTP Basic Authentication with the provided username and password. It
-// additionally supports users who have two-factor authentication enabled on
-// their GitHub account.
-type BasicAuthTransport struct {
- Username string // GitHub username
- Password string // GitHub password
- OTP string // one-time password for users with two-factor auth enabled
-
- // Transport is the underlying HTTP transport to use when making requests.
- // It will default to http.DefaultTransport if nil.
- Transport http.RoundTripper
-}
-
-// RoundTrip implements the RoundTripper interface.
-func (t *BasicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- req = cloneRequest(req) // per RoundTrip contract
- req.SetBasicAuth(t.Username, t.Password)
- if t.OTP != "" {
- req.Header.Set(headerOTP, t.OTP)
- }
- return t.transport().RoundTrip(req)
-}
-
-// Client returns an *http.Client that makes requests that are authenticated
-// using HTTP Basic Authentication.
-func (t *BasicAuthTransport) Client() *http.Client {
- return &http.Client{Transport: t}
-}
-
-func (t *BasicAuthTransport) transport() http.RoundTripper {
- if t.Transport != nil {
- return t.Transport
- }
- return http.DefaultTransport
-}
-
-// cloneRequest returns a clone of the provided *http.Request. The clone is a
-// shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
- // shallow copy of the struct
- r2 := new(http.Request)
- *r2 = *r
- // deep copy of the Header
- r2.Header = make(http.Header, len(r.Header))
- for k, s := range r.Header {
- r2.Header[k] = append([]string(nil), s...)
- }
- return r2
-}
-
-// Bool is a helper routine that allocates a new bool value
-// to store v and returns a pointer to it.
-func Bool(v bool) *bool { return &v }
-
-// Int is a helper routine that allocates a new int value
-// to store v and returns a pointer to it.
-func Int(v int) *int { return &v }
-
-// String is a helper routine that allocates a new string value
-// to store v and returns a pointer to it.
-func String(v string) *string { return &v }
diff --git a/vendor/src/github.com/google/go-github/github/github_test.go b/vendor/src/github.com/google/go-github/github/github_test.go
deleted file mode 100644
index e166bb0..0000000
--- a/vendor/src/github.com/google/go-github/github/github_test.go
+++ /dev/null
@@ -1,865 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "net/url"
- "os"
- "path"
- "reflect"
- "strings"
- "testing"
- "time"
-)
-
-var (
- // mux is the HTTP request multiplexer used with the test server.
- mux *http.ServeMux
-
- // client is the GitHub client being tested.
- client *Client
-
- // server is a test HTTP server used to provide mock API responses.
- server *httptest.Server
-)
-
-// setup sets up a test HTTP server along with a github.Client that is
-// configured to talk to that test server. Tests should register handlers on
-// mux which provide mock responses for the API method being tested.
-func setup() {
- // test server
- mux = http.NewServeMux()
- server = httptest.NewServer(mux)
-
- // github client configured to use test server
- client = NewClient(nil)
- url, _ := url.Parse(server.URL)
- client.BaseURL = url
- client.UploadURL = url
-}
-
-// teardown closes the test HTTP server.
-func teardown() {
- server.Close()
-}
-
-// openTestFile creates a new file with the given name and content for testing.
-// In order to ensure the exact file name, this function will create a new temp
-// directory, and create the file in that directory. It is the caller's
-// responsibility to remove the directory and its contents when no longer needed.
-func openTestFile(name, content string) (file *os.File, dir string, err error) {
- dir, err = ioutil.TempDir("", "go-github")
- if err != nil {
- return nil, dir, err
- }
-
- file, err = os.OpenFile(path.Join(dir, name), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
- if err != nil {
- return nil, dir, err
- }
-
- fmt.Fprint(file, content)
-
- // close and re-open the file to keep file.Stat() happy
- file.Close()
- file, err = os.Open(file.Name())
- if err != nil {
- return nil, dir, err
- }
-
- return file, dir, err
-}
-
-func testMethod(t *testing.T, r *http.Request, want string) {
- if got := r.Method; got != want {
- t.Errorf("Request method: %v, want %v", got, want)
- }
-}
-
-type values map[string]string
-
-func testFormValues(t *testing.T, r *http.Request, values values) {
- want := url.Values{}
- for k, v := range values {
- want.Add(k, v)
- }
-
- r.ParseForm()
- if got := r.Form; !reflect.DeepEqual(got, want) {
- t.Errorf("Request parameters: %v, want %v", got, want)
- }
-}
-
-func testHeader(t *testing.T, r *http.Request, header string, want string) {
- if got := r.Header.Get(header); got != want {
- t.Errorf("Header.Get(%q) returned %q, want %q", header, got, want)
- }
-}
-
-func testURLParseError(t *testing.T, err error) {
- if err == nil {
- t.Errorf("Expected error to be returned")
- }
- if err, ok := err.(*url.Error); !ok || err.Op != "parse" {
- t.Errorf("Expected URL parse error, got %+v", err)
- }
-}
-
-func testBody(t *testing.T, r *http.Request, want string) {
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- t.Errorf("Error reading request body: %v", err)
- }
- if got := string(b); got != want {
- t.Errorf("request Body is %s, want %s", got, want)
- }
-}
-
-// Helper function to test that a value is marshalled to JSON as expected.
-func testJSONMarshal(t *testing.T, v interface{}, want string) {
- j, err := json.Marshal(v)
- if err != nil {
- t.Errorf("Unable to marshal JSON for %v", v)
- }
-
- w := new(bytes.Buffer)
- err = json.Compact(w, []byte(want))
- if err != nil {
- t.Errorf("String is not valid json: %s", want)
- }
-
- if w.String() != string(j) {
- t.Errorf("json.Marshal(%q) returned %s, want %s", v, j, w)
- }
-
- // now go the other direction and make sure things unmarshal as expected
- u := reflect.ValueOf(v).Interface()
- if err := json.Unmarshal([]byte(want), u); err != nil {
- t.Errorf("Unable to unmarshal JSON for %v", want)
- }
-
- if !reflect.DeepEqual(v, u) {
- t.Errorf("json.Unmarshal(%q) returned %s, want %s", want, u, v)
- }
-}
-
-func TestNewClient(t *testing.T) {
- c := NewClient(nil)
-
- if got, want := c.BaseURL.String(), defaultBaseURL; got != want {
- t.Errorf("NewClient BaseURL is %v, want %v", got, want)
- }
- if got, want := c.UserAgent, userAgent; got != want {
- t.Errorf("NewClient UserAgent is %v, want %v", got, want)
- }
-}
-
-// Ensure that length of Client.rateLimits is the same as number of fields in RateLimits struct.
-func TestClient_rateLimits(t *testing.T) {
- if got, want := len(Client{}.rateLimits), reflect.TypeOf(RateLimits{}).NumField(); got != want {
- t.Errorf("len(Client{}.rateLimits) is %v, want %v", got, want)
- }
-}
-
-func TestNewRequest(t *testing.T) {
- c := NewClient(nil)
-
- inURL, outURL := "/foo", defaultBaseURL+"foo"
- inBody, outBody := &User{Login: String("l")}, `{"login":"l"}`+"\n"
- req, _ := c.NewRequest("GET", inURL, inBody)
-
- // test that relative URL was expanded
- if got, want := req.URL.String(), outURL; got != want {
- t.Errorf("NewRequest(%q) URL is %v, want %v", inURL, got, want)
- }
-
- // test that body was JSON encoded
- body, _ := ioutil.ReadAll(req.Body)
- if got, want := string(body), outBody; got != want {
- t.Errorf("NewRequest(%q) Body is %v, want %v", inBody, got, want)
- }
-
- // test that default user-agent is attached to the request
- if got, want := req.Header.Get("User-Agent"), c.UserAgent; got != want {
- t.Errorf("NewRequest() User-Agent is %v, want %v", got, want)
- }
-}
-
-func TestNewRequest_invalidJSON(t *testing.T) {
- c := NewClient(nil)
-
- type T struct {
- A map[interface{}]interface{}
- }
- _, err := c.NewRequest("GET", "/", &T{})
-
- if err == nil {
- t.Error("Expected error to be returned.")
- }
- if err, ok := err.(*json.UnsupportedTypeError); !ok {
- t.Errorf("Expected a JSON error; got %#v.", err)
- }
-}
-
-func TestNewRequest_badURL(t *testing.T) {
- c := NewClient(nil)
- _, err := c.NewRequest("GET", ":", nil)
- testURLParseError(t, err)
-}
-
-// ensure that no User-Agent header is set if the client's UserAgent is empty.
-// This caused a problem with Google's internal http client.
-func TestNewRequest_emptyUserAgent(t *testing.T) {
- c := NewClient(nil)
- c.UserAgent = ""
- req, err := c.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatalf("NewRequest returned unexpected error: %v", err)
- }
- if _, ok := req.Header["User-Agent"]; ok {
- t.Fatal("constructed request contains unexpected User-Agent header")
- }
-}
-
-// If a nil body is passed to github.NewRequest, make sure that nil is also
-// passed to http.NewRequest. In most cases, passing an io.Reader that returns
-// no content is fine, since there is no difference between an HTTP request
-// body that is an empty string versus one that is not set at all. However in
-// certain cases, intermediate systems may treat these differently resulting in
-// subtle errors.
-func TestNewRequest_emptyBody(t *testing.T) {
- c := NewClient(nil)
- req, err := c.NewRequest("GET", "/", nil)
- if err != nil {
- t.Fatalf("NewRequest returned unexpected error: %v", err)
- }
- if req.Body != nil {
- t.Fatalf("constructed request contains a non-nil Body")
- }
-}
-
-func TestResponse_populatePageValues(t *testing.T) {
- r := http.Response{
- Header: http.Header{
- "Link": {`; rel="first",` +
- ` ; rel="prev",` +
- ` ; rel="next",` +
- ` ; rel="last"`,
- },
- },
- }
-
- response := newResponse(&r)
- if got, want := response.FirstPage, 1; got != want {
- t.Errorf("response.FirstPage: %v, want %v", got, want)
- }
- if got, want := response.PrevPage, 2; want != got {
- t.Errorf("response.PrevPage: %v, want %v", got, want)
- }
- if got, want := response.NextPage, 4; want != got {
- t.Errorf("response.NextPage: %v, want %v", got, want)
- }
- if got, want := response.LastPage, 5; want != got {
- t.Errorf("response.LastPage: %v, want %v", got, want)
- }
-}
-
-func TestResponse_populatePageValues_invalid(t *testing.T) {
- r := http.Response{
- Header: http.Header{
- "Link": {`,` +
- `; rel="first",` +
- `https://api.github.com/?page=2; rel="prev",` +
- ` ; rel="next",` +
- `; rel="last"`,
- },
- },
- }
-
- response := newResponse(&r)
- if got, want := response.FirstPage, 0; got != want {
- t.Errorf("response.FirstPage: %v, want %v", got, want)
- }
- if got, want := response.PrevPage, 0; got != want {
- t.Errorf("response.PrevPage: %v, want %v", got, want)
- }
- if got, want := response.NextPage, 0; got != want {
- t.Errorf("response.NextPage: %v, want %v", got, want)
- }
- if got, want := response.LastPage, 0; got != want {
- t.Errorf("response.LastPage: %v, want %v", got, want)
- }
-
- // more invalid URLs
- r = http.Response{
- Header: http.Header{
- "Link": {`; rel="first"`},
- },
- }
-
- response = newResponse(&r)
- if got, want := response.FirstPage, 0; got != want {
- t.Errorf("response.FirstPage: %v, want %v", got, want)
- }
-}
-
-func TestDo(t *testing.T) {
- setup()
- defer teardown()
-
- type foo struct {
- A string
- }
-
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- if m := "GET"; m != r.Method {
- t.Errorf("Request method = %v, want %v", r.Method, m)
- }
- fmt.Fprint(w, `{"A":"a"}`)
- })
-
- req, _ := client.NewRequest("GET", "/", nil)
- body := new(foo)
- client.Do(req, body)
-
- want := &foo{"a"}
- if !reflect.DeepEqual(body, want) {
- t.Errorf("Response body = %v, want %v", body, want)
- }
-}
-
-func TestDo_httpError(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- http.Error(w, "Bad Request", 400)
- })
-
- req, _ := client.NewRequest("GET", "/", nil)
- _, err := client.Do(req, nil)
-
- if err == nil {
- t.Error("Expected HTTP 400 error.")
- }
-}
-
-// Test handling of an error caused by the internal http client's Do()
-// function. A redirect loop is pretty unlikely to occur within the GitHub
-// API, but does allow us to exercise the right code path.
-func TestDo_redirectLoop(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- http.Redirect(w, r, "/", http.StatusFound)
- })
-
- req, _ := client.NewRequest("GET", "/", nil)
- _, err := client.Do(req, nil)
-
- if err == nil {
- t.Error("Expected error to be returned.")
- }
- if err, ok := err.(*url.Error); !ok {
- t.Errorf("Expected a URL error; got %#v.", err)
- }
-}
-
-func TestDo_rateLimit(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Add(headerRateLimit, "60")
- w.Header().Add(headerRateRemaining, "59")
- w.Header().Add(headerRateReset, "1372700873")
- })
-
- if got, want := client.Rate().Limit, 0; got != want {
- t.Errorf("Client rate limit = %v, want %v", got, want)
- }
- if got, want := client.Rate().Remaining, 0; got != want {
- t.Errorf("Client rate remaining = %v, got %v", got, want)
- }
- if !client.Rate().Reset.IsZero() {
- t.Errorf("Client rate reset not initialized to zero value")
- }
-
- req, _ := client.NewRequest("GET", "/", nil)
- _, err := client.Do(req, nil)
- if err != nil {
- t.Errorf("Do returned unexpected error: %v", err)
- }
- if got, want := client.Rate().Limit, 60; got != want {
- t.Errorf("Client rate limit = %v, want %v", got, want)
- }
- if got, want := client.Rate().Remaining, 59; got != want {
- t.Errorf("Client rate remaining = %v, want %v", got, want)
- }
- reset := time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC)
- if client.Rate().Reset.UTC() != reset {
- t.Errorf("Client rate reset = %v, want %v", client.Rate().Reset, reset)
- }
-}
-
-// ensure rate limit is still parsed, even for error responses
-func TestDo_rateLimit_errorResponse(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Add(headerRateLimit, "60")
- w.Header().Add(headerRateRemaining, "59")
- w.Header().Add(headerRateReset, "1372700873")
- http.Error(w, "Bad Request", 400)
- })
-
- req, _ := client.NewRequest("GET", "/", nil)
- _, err := client.Do(req, nil)
-
- if err == nil {
- t.Error("Expected error to be returned.")
- }
- if _, ok := err.(*RateLimitError); ok {
- t.Errorf("Did not expect a *RateLimitError error; got %#v.", err)
- }
- if got, want := client.Rate().Limit, 60; got != want {
- t.Errorf("Client rate limit = %v, want %v", got, want)
- }
- if got, want := client.Rate().Remaining, 59; got != want {
- t.Errorf("Client rate remaining = %v, want %v", got, want)
- }
- reset := time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC)
- if client.Rate().Reset.UTC() != reset {
- t.Errorf("Client rate reset = %v, want %v", client.Rate().Reset, reset)
- }
-}
-
-// Ensure *RateLimitError is returned when API rate limit is exceeded.
-func TestDo_rateLimit_rateLimitError(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Add(headerRateLimit, "60")
- w.Header().Add(headerRateRemaining, "0")
- w.Header().Add(headerRateReset, "1372700873")
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- w.WriteHeader(http.StatusForbidden)
- fmt.Fprintln(w, `{
- "message": "API rate limit exceeded for xxx.xxx.xxx.xxx. (But here's the good news: Authenticated requests get a higher rate limit. Check out the documentation for more details.)",
- "documentation_url": "https://developer.github.com/v3/#rate-limiting"
-}`)
- })
-
- req, _ := client.NewRequest("GET", "/", nil)
- _, err := client.Do(req, nil)
-
- if err == nil {
- t.Error("Expected error to be returned.")
- }
- rateLimitErr, ok := err.(*RateLimitError)
- if !ok {
- t.Fatalf("Expected a *RateLimitError error; got %#v.", err)
- }
- if got, want := rateLimitErr.Rate.Limit, 60; got != want {
- t.Errorf("rateLimitErr rate limit = %v, want %v", got, want)
- }
- if got, want := rateLimitErr.Rate.Remaining, 0; got != want {
- t.Errorf("rateLimitErr rate remaining = %v, want %v", got, want)
- }
- reset := time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC)
- if rateLimitErr.Rate.Reset.UTC() != reset {
- t.Errorf("rateLimitErr rate reset = %v, want %v", rateLimitErr.Rate.Reset.UTC(), reset)
- }
-}
-
-// Ensure a network call is not made when it's known that API rate limit is still exceeded.
-func TestDo_rateLimit_noNetworkCall(t *testing.T) {
- setup()
- defer teardown()
-
- reset := time.Now().UTC().Round(time.Second).Add(time.Minute) // Rate reset is a minute from now, with 1 second precision.
-
- mux.HandleFunc("/first", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Add(headerRateLimit, "60")
- w.Header().Add(headerRateRemaining, "0")
- w.Header().Add(headerRateReset, fmt.Sprint(reset.Unix()))
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- w.WriteHeader(http.StatusForbidden)
- fmt.Fprintln(w, `{
- "message": "API rate limit exceeded for xxx.xxx.xxx.xxx. (But here's the good news: Authenticated requests get a higher rate limit. Check out the documentation for more details.)",
- "documentation_url": "https://developer.github.com/v3/#rate-limiting"
-}`)
- })
-
- madeNetworkCall := false
- mux.HandleFunc("/second", func(w http.ResponseWriter, r *http.Request) {
- madeNetworkCall = true
- })
-
- // First request is made, and it makes the client aware of rate reset time being in the future.
- req, _ := client.NewRequest("GET", "/first", nil)
- client.Do(req, nil)
-
- // Second request should not cause a network call to be made, since client can predict a rate limit error.
- req, _ = client.NewRequest("GET", "/second", nil)
- _, err := client.Do(req, nil)
-
- if madeNetworkCall {
- t.Fatal("Network call was made, even though rate limit is known to still be exceeded.")
- }
-
- if err == nil {
- t.Error("Expected error to be returned.")
- }
- rateLimitErr, ok := err.(*RateLimitError)
- if !ok {
- t.Fatalf("Expected a *RateLimitError error; got %#v.", err)
- }
- if got, want := rateLimitErr.Rate.Limit, 60; got != want {
- t.Errorf("rateLimitErr rate limit = %v, want %v", got, want)
- }
- if got, want := rateLimitErr.Rate.Remaining, 0; got != want {
- t.Errorf("rateLimitErr rate remaining = %v, want %v", got, want)
- }
- if rateLimitErr.Rate.Reset.UTC() != reset {
- t.Errorf("rateLimitErr rate reset = %v, want %v", rateLimitErr.Rate.Reset.UTC(), reset)
- }
-}
-
-func TestDo_noContent(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(http.StatusNoContent)
- })
-
- var body json.RawMessage
-
- req, _ := client.NewRequest("GET", "/", nil)
- _, err := client.Do(req, &body)
- if err != nil {
- t.Fatalf("Do returned unexpected error: %v", err)
- }
-}
-
-func TestSanitizeURL(t *testing.T) {
- tests := []struct {
- in, want string
- }{
- {"/?a=b", "/?a=b"},
- {"/?a=b&client_secret=secret", "/?a=b&client_secret=REDACTED"},
- {"/?a=b&client_id=id&client_secret=secret", "/?a=b&client_id=id&client_secret=REDACTED"},
- }
-
- for _, tt := range tests {
- inURL, _ := url.Parse(tt.in)
- want, _ := url.Parse(tt.want)
-
- if got := sanitizeURL(inURL); !reflect.DeepEqual(got, want) {
- t.Errorf("sanitizeURL(%v) returned %v, want %v", tt.in, got, want)
- }
- }
-}
-
-func TestCheckResponse(t *testing.T) {
- res := &http.Response{
- Request: &http.Request{},
- StatusCode: http.StatusBadRequest,
- Body: ioutil.NopCloser(strings.NewReader(`{"message":"m",
- "errors": [{"resource": "r", "field": "f", "code": "c"}],
- "block": {"reason": "dmca", "created_at": "2016-03-17T15:39:46Z"}}`)),
- }
- err := CheckResponse(res).(*ErrorResponse)
-
- if err == nil {
- t.Errorf("Expected error response.")
- }
-
- want := &ErrorResponse{
- Response: res,
- Message: "m",
- Errors: []Error{{Resource: "r", Field: "f", Code: "c"}},
- Block: &struct {
- Reason string `json:"reason,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- }{
- Reason: "dmca",
- CreatedAt: &Timestamp{time.Date(2016, 3, 17, 15, 39, 46, 0, time.UTC)},
- },
- }
- if !reflect.DeepEqual(err, want) {
- t.Errorf("Error = %#v, want %#v", err, want)
- }
-}
-
-// ensure that we properly handle API errors that do not contain a response body
-func TestCheckResponse_noBody(t *testing.T) {
- res := &http.Response{
- Request: &http.Request{},
- StatusCode: http.StatusBadRequest,
- Body: ioutil.NopCloser(strings.NewReader("")),
- }
- err := CheckResponse(res).(*ErrorResponse)
-
- if err == nil {
- t.Errorf("Expected error response.")
- }
-
- want := &ErrorResponse{
- Response: res,
- }
- if !reflect.DeepEqual(err, want) {
- t.Errorf("Error = %#v, want %#v", err, want)
- }
-}
-
-func TestParseBooleanResponse_true(t *testing.T) {
- result, err := parseBoolResponse(nil)
- if err != nil {
- t.Errorf("parseBoolResponse returned error: %+v", err)
- }
-
- if want := true; result != want {
- t.Errorf("parseBoolResponse returned %+v, want: %+v", result, want)
- }
-}
-
-func TestParseBooleanResponse_false(t *testing.T) {
- v := &ErrorResponse{Response: &http.Response{StatusCode: http.StatusNotFound}}
- result, err := parseBoolResponse(v)
- if err != nil {
- t.Errorf("parseBoolResponse returned error: %+v", err)
- }
-
- if want := false; result != want {
- t.Errorf("parseBoolResponse returned %+v, want: %+v", result, want)
- }
-}
-
-func TestParseBooleanResponse_error(t *testing.T) {
- v := &ErrorResponse{Response: &http.Response{StatusCode: http.StatusBadRequest}}
- result, err := parseBoolResponse(v)
-
- if err == nil {
- t.Errorf("Expected error to be returned.")
- }
-
- if want := false; result != want {
- t.Errorf("parseBoolResponse returned %+v, want: %+v", result, want)
- }
-}
-
-func TestErrorResponse_Error(t *testing.T) {
- res := &http.Response{Request: &http.Request{}}
- err := ErrorResponse{Message: "m", Response: res}
- if err.Error() == "" {
- t.Errorf("Expected non-empty ErrorResponse.Error()")
- }
-}
-
-func TestError_Error(t *testing.T) {
- err := Error{}
- if err.Error() == "" {
- t.Errorf("Expected non-empty Error.Error()")
- }
-}
-
-func TestRateLimit(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/rate_limit", func(w http.ResponseWriter, r *http.Request) {
- if m := "GET"; m != r.Method {
- t.Errorf("Request method = %v, want %v", r.Method, m)
- }
- fmt.Fprint(w, `{"resources":{
- "core": {"limit":2,"remaining":1,"reset":1372700873},
- "search": {"limit":3,"remaining":2,"reset":1372700874}
- }}`)
- })
-
- rate, _, err := client.RateLimit()
- if err != nil {
- t.Errorf("Rate limit returned error: %v", err)
- }
-
- want := &Rate{
- Limit: 2,
- Remaining: 1,
- Reset: Timestamp{time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC).Local()},
- }
- if !reflect.DeepEqual(rate, want) {
- t.Errorf("RateLimit returned %+v, want %+v", rate, want)
- }
-}
-
-func TestRateLimits(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/rate_limit", func(w http.ResponseWriter, r *http.Request) {
- if m := "GET"; m != r.Method {
- t.Errorf("Request method = %v, want %v", r.Method, m)
- }
- fmt.Fprint(w, `{"resources":{
- "core": {"limit":2,"remaining":1,"reset":1372700873},
- "search": {"limit":3,"remaining":2,"reset":1372700874}
- }}`)
- })
-
- rate, _, err := client.RateLimits()
- if err != nil {
- t.Errorf("RateLimits returned error: %v", err)
- }
-
- want := &RateLimits{
- Core: &Rate{
- Limit: 2,
- Remaining: 1,
- Reset: Timestamp{time.Date(2013, 7, 1, 17, 47, 53, 0, time.UTC).Local()},
- },
- Search: &Rate{
- Limit: 3,
- Remaining: 2,
- Reset: Timestamp{time.Date(2013, 7, 1, 17, 47, 54, 0, time.UTC).Local()},
- },
- }
- if !reflect.DeepEqual(rate, want) {
- t.Errorf("RateLimits returned %+v, want %+v", rate, want)
- }
-
- if got, want := client.rateLimits[coreCategory], *want.Core; got != want {
- t.Errorf("client.rateLimits[coreCategory] is %+v, want %+v", got, want)
- }
- if got, want := client.rateLimits[searchCategory], *want.Search; got != want {
- t.Errorf("client.rateLimits[searchCategory] is %+v, want %+v", got, want)
- }
-}
-
-func TestUnauthenticatedRateLimitedTransport(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- var v, want string
- q := r.URL.Query()
- if v, want = q.Get("client_id"), "id"; v != want {
- t.Errorf("OAuth Client ID = %v, want %v", v, want)
- }
- if v, want = q.Get("client_secret"), "secret"; v != want {
- t.Errorf("OAuth Client Secret = %v, want %v", v, want)
- }
- })
-
- tp := &UnauthenticatedRateLimitedTransport{
- ClientID: "id",
- ClientSecret: "secret",
- }
- unauthedClient := NewClient(tp.Client())
- unauthedClient.BaseURL = client.BaseURL
- req, _ := unauthedClient.NewRequest("GET", "/", nil)
- unauthedClient.Do(req, nil)
-}
-
-func TestUnauthenticatedRateLimitedTransport_missingFields(t *testing.T) {
- // missing ClientID
- tp := &UnauthenticatedRateLimitedTransport{
- ClientSecret: "secret",
- }
- _, err := tp.RoundTrip(nil)
- if err == nil {
- t.Errorf("Expected error to be returned")
- }
-
- // missing ClientSecret
- tp = &UnauthenticatedRateLimitedTransport{
- ClientID: "id",
- }
- _, err = tp.RoundTrip(nil)
- if err == nil {
- t.Errorf("Expected error to be returned")
- }
-}
-
-func TestUnauthenticatedRateLimitedTransport_transport(t *testing.T) {
- // default transport
- tp := &UnauthenticatedRateLimitedTransport{
- ClientID: "id",
- ClientSecret: "secret",
- }
- if tp.transport() != http.DefaultTransport {
- t.Errorf("Expected http.DefaultTransport to be used.")
- }
-
- // custom transport
- tp = &UnauthenticatedRateLimitedTransport{
- ClientID: "id",
- ClientSecret: "secret",
- Transport: &http.Transport{},
- }
- if tp.transport() == http.DefaultTransport {
- t.Errorf("Expected custom transport to be used.")
- }
-}
-
-func TestBasicAuthTransport(t *testing.T) {
- setup()
- defer teardown()
-
- username, password, otp := "u", "p", "123456"
-
- mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- u, p, ok := r.BasicAuth()
- if !ok {
- t.Errorf("request does not contain basic auth credentials")
- }
- if u != username {
- t.Errorf("request contained basic auth username %q, want %q", u, username)
- }
- if p != password {
- t.Errorf("request contained basic auth password %q, want %q", p, password)
- }
- if got, want := r.Header.Get(headerOTP), otp; got != want {
- t.Errorf("request contained OTP %q, want %q", got, want)
- }
- })
-
- tp := &BasicAuthTransport{
- Username: username,
- Password: password,
- OTP: otp,
- }
- basicAuthClient := NewClient(tp.Client())
- basicAuthClient.BaseURL = client.BaseURL
- req, _ := basicAuthClient.NewRequest("GET", "/", nil)
- basicAuthClient.Do(req, nil)
-}
-
-func TestBasicAuthTransport_transport(t *testing.T) {
- // default transport
- tp := &BasicAuthTransport{}
- if tp.transport() != http.DefaultTransport {
- t.Errorf("Expected http.DefaultTransport to be used.")
- }
-
- // custom transport
- tp = &BasicAuthTransport{
- Transport: &http.Transport{},
- }
- if tp.transport() == http.DefaultTransport {
- t.Errorf("Expected custom transport to be used.")
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/gitignore.go b/vendor/src/github.com/google/go-github/github/gitignore.go
deleted file mode 100644
index faaceb5..0000000
--- a/vendor/src/github.com/google/go-github/github/gitignore.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// GitignoresService provides access to the gitignore related functions in the
-// GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/gitignore/
-type GitignoresService service
-
-// Gitignore represents a .gitignore file as returned by the GitHub API.
-type Gitignore struct {
- Name *string `json:"name,omitempty"`
- Source *string `json:"source,omitempty"`
-}
-
-func (g Gitignore) String() string {
- return Stringify(g)
-}
-
-// List all available Gitignore templates.
-//
-// http://developer.github.com/v3/gitignore/#listing-available-templates
-func (s GitignoresService) List() ([]string, *Response, error) {
- req, err := s.client.NewRequest("GET", "gitignore/templates", nil)
- if err != nil {
- return nil, nil, err
- }
-
- availableTemplates := new([]string)
- resp, err := s.client.Do(req, availableTemplates)
- if err != nil {
- return nil, resp, err
- }
-
- return *availableTemplates, resp, err
-}
-
-// Get a Gitignore by name.
-//
-// http://developer.github.com/v3/gitignore/#get-a-single-template
-func (s GitignoresService) Get(name string) (*Gitignore, *Response, error) {
- u := fmt.Sprintf("gitignore/templates/%v", name)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- gitignore := new(Gitignore)
- resp, err := s.client.Do(req, gitignore)
- if err != nil {
- return nil, resp, err
- }
-
- return gitignore, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/gitignore_test.go b/vendor/src/github.com/google/go-github/github/gitignore_test.go
deleted file mode 100644
index 6d49d00..0000000
--- a/vendor/src/github.com/google/go-github/github/gitignore_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestGitignoresService_List(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gitignore/templates", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `["C", "Go"]`)
- })
-
- available, _, err := client.Gitignores.List()
- if err != nil {
- t.Errorf("Gitignores.List returned error: %v", err)
- }
-
- want := []string{"C", "Go"}
- if !reflect.DeepEqual(available, want) {
- t.Errorf("Gitignores.List returned %+v, want %+v", available, want)
- }
-}
-
-func TestGitignoresService_Get(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/gitignore/templates/name", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"name":"Name","source":"template source"}`)
- })
-
- gitignore, _, err := client.Gitignores.Get("name")
- if err != nil {
- t.Errorf("Gitignores.List returned error: %v", err)
- }
-
- want := &Gitignore{Name: String("Name"), Source: String("template source")}
- if !reflect.DeepEqual(gitignore, want) {
- t.Errorf("Gitignores.Get returned %+v, want %+v", gitignore, want)
- }
-}
-
-func TestGitignoresService_Get_invalidTemplate(t *testing.T) {
- _, _, err := client.Gitignores.Get("%")
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues.go b/vendor/src/github.com/google/go-github/github/issues.go
deleted file mode 100644
index 02c82cd..0000000
--- a/vendor/src/github.com/google/go-github/github/issues.go
+++ /dev/null
@@ -1,299 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// IssuesService handles communication with the issue related
-// methods of the GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/
-type IssuesService service
-
-// Issue represents a GitHub issue on a repository.
-type Issue struct {
- ID *int `json:"id,omitempty"`
- Number *int `json:"number,omitempty"`
- State *string `json:"state,omitempty"`
- Title *string `json:"title,omitempty"`
- Body *string `json:"body,omitempty"`
- User *User `json:"user,omitempty"`
- Labels []Label `json:"labels,omitempty"`
- Assignee *User `json:"assignee,omitempty"`
- Comments *int `json:"comments,omitempty"`
- ClosedAt *time.Time `json:"closed_at,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- Milestone *Milestone `json:"milestone,omitempty"`
- PullRequestLinks *PullRequestLinks `json:"pull_request,omitempty"`
- Repository *Repository `json:"repository,omitempty"`
- Reactions *Reactions `json:"reactions,omitempty"`
- Assignees []*User `json:"assignees,omitempty"`
-
- // TextMatches is only populated from search results that request text matches
- // See: search.go and https://developer.github.com/v3/search/#text-match-metadata
- TextMatches []TextMatch `json:"text_matches,omitempty"`
-}
-
-func (i Issue) String() string {
- return Stringify(i)
-}
-
-// IssueRequest represents a request to create/edit an issue.
-// It is separate from Issue above because otherwise Labels
-// and Assignee fail to serialize to the correct JSON.
-type IssueRequest struct {
- Title *string `json:"title,omitempty"`
- Body *string `json:"body,omitempty"`
- Labels *[]string `json:"labels,omitempty"`
- Assignee *string `json:"assignee,omitempty"`
- State *string `json:"state,omitempty"`
- Milestone *int `json:"milestone,omitempty"`
- Assignees *[]string `json:"assignees,omitempty"`
-}
-
-// IssueListOptions specifies the optional parameters to the IssuesService.List
-// and IssuesService.ListByOrg methods.
-type IssueListOptions struct {
- // Filter specifies which issues to list. Possible values are: assigned,
- // created, mentioned, subscribed, all. Default is "assigned".
- Filter string `url:"filter,omitempty"`
-
- // State filters issues based on their state. Possible values are: open,
- // closed, all. Default is "open".
- State string `url:"state,omitempty"`
-
- // Labels filters issues based on their label.
- Labels []string `url:"labels,comma,omitempty"`
-
- // Sort specifies how to sort issues. Possible values are: created, updated,
- // and comments. Default value is "created".
- Sort string `url:"sort,omitempty"`
-
- // Direction in which to sort issues. Possible values are: asc, desc.
- // Default is "desc".
- Direction string `url:"direction,omitempty"`
-
- // Since filters issues by time.
- Since time.Time `url:"since,omitempty"`
-
- ListOptions
-}
-
-// PullRequestLinks object is added to the Issue object when it's an issue included
-// in the IssueCommentEvent webhook payload, if the webhooks is fired by a comment on a PR
-type PullRequestLinks struct {
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- DiffURL *string `json:"diff_url,omitempty"`
- PatchURL *string `json:"patch_url,omitempty"`
-}
-
-// List the issues for the authenticated user. If all is true, list issues
-// across all the user's visible repositories including owned, member, and
-// organization repositories; if false, list only owned and member
-// repositories.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/#list-issues
-func (s *IssuesService) List(all bool, opt *IssueListOptions) ([]*Issue, *Response, error) {
- var u string
- if all {
- u = "issues"
- } else {
- u = "user/issues"
- }
- return s.listIssues(u, opt)
-}
-
-// ListByOrg fetches the issues in the specified organization for the
-// authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/#list-issues
-func (s *IssuesService) ListByOrg(org string, opt *IssueListOptions) ([]*Issue, *Response, error) {
- u := fmt.Sprintf("orgs/%v/issues", org)
- return s.listIssues(u, opt)
-}
-
-func (s *IssuesService) listIssues(u string, opt *IssueListOptions) ([]*Issue, *Response, error) {
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- issues := new([]*Issue)
- resp, err := s.client.Do(req, issues)
- if err != nil {
- return nil, resp, err
- }
-
- return *issues, resp, err
-}
-
-// IssueListByRepoOptions specifies the optional parameters to the
-// IssuesService.ListByRepo method.
-type IssueListByRepoOptions struct {
- // Milestone limits issues for the specified milestone. Possible values are
- // a milestone number, "none" for issues with no milestone, "*" for issues
- // with any milestone.
- Milestone string `url:"milestone,omitempty"`
-
- // State filters issues based on their state. Possible values are: open,
- // closed, all. Default is "open".
- State string `url:"state,omitempty"`
-
- // Assignee filters issues based on their assignee. Possible values are a
- // user name, "none" for issues that are not assigned, "*" for issues with
- // any assigned user.
- Assignee string `url:"assignee,omitempty"`
-
- // Creator filters issues based on their creator.
- Creator string `url:"creator,omitempty"`
-
- // Mentioned filters issues to those mentioned a specific user.
- Mentioned string `url:"mentioned,omitempty"`
-
- // Labels filters issues based on their label.
- Labels []string `url:"labels,omitempty,comma"`
-
- // Sort specifies how to sort issues. Possible values are: created, updated,
- // and comments. Default value is "created".
- Sort string `url:"sort,omitempty"`
-
- // Direction in which to sort issues. Possible values are: asc, desc.
- // Default is "desc".
- Direction string `url:"direction,omitempty"`
-
- // Since filters issues by time.
- Since time.Time `url:"since,omitempty"`
-
- ListOptions
-}
-
-// ListByRepo lists the issues for the specified repository.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/#list-issues-for-a-repository
-func (s *IssuesService) ListByRepo(owner string, repo string, opt *IssueListByRepoOptions) ([]*Issue, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- issues := new([]*Issue)
- resp, err := s.client.Do(req, issues)
- if err != nil {
- return nil, resp, err
- }
-
- return *issues, resp, err
-}
-
-// Get a single issue.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/#get-a-single-issue
-func (s *IssuesService) Get(owner string, repo string, number int) (*Issue, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- issue := new(Issue)
- resp, err := s.client.Do(req, issue)
- if err != nil {
- return nil, resp, err
- }
-
- return issue, resp, err
-}
-
-// Create a new issue on the specified repository.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/#create-an-issue
-func (s *IssuesService) Create(owner string, repo string, issue *IssueRequest) (*Issue, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues", owner, repo)
- req, err := s.client.NewRequest("POST", u, issue)
- if err != nil {
- return nil, nil, err
- }
-
- i := new(Issue)
- resp, err := s.client.Do(req, i)
- if err != nil {
- return nil, resp, err
- }
-
- return i, resp, err
-}
-
-// Edit an issue.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/#edit-an-issue
-func (s *IssuesService) Edit(owner string, repo string, number int, issue *IssueRequest) (*Issue, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number)
- req, err := s.client.NewRequest("PATCH", u, issue)
- if err != nil {
- return nil, nil, err
- }
-
- i := new(Issue)
- resp, err := s.client.Do(req, i)
- if err != nil {
- return nil, resp, err
- }
-
- return i, resp, err
-}
-
-// Lock an issue's conversation.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/#lock-an-issue
-func (s *IssuesService) Lock(owner string, repo string, number int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number)
- req, err := s.client.NewRequest("PUT", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// Unlock an issue's conversation.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/#unlock-an-issue
-func (s *IssuesService) Unlock(owner string, repo string, number int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_assignees.go b/vendor/src/github.com/google/go-github/github/issues_assignees.go
deleted file mode 100644
index 2503be1..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_assignees.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// ListAssignees fetches all available assignees (owners and collaborators) to
-// which issues may be assigned.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/assignees/#list-assignees
-func (s *IssuesService) ListAssignees(owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/assignees", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
- assignees := new([]*User)
- resp, err := s.client.Do(req, assignees)
- if err != nil {
- return nil, resp, err
- }
-
- return *assignees, resp, err
-}
-
-// IsAssignee checks if a user is an assignee for the specified repository.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/assignees/#check-assignee
-func (s *IssuesService) IsAssignee(owner, repo, user string) (bool, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/assignees/%v", owner, repo, user)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return false, nil, err
- }
- resp, err := s.client.Do(req, nil)
- assignee, err := parseBoolResponse(err)
- return assignee, resp, err
-}
-
-// AddAssignees adds the provided GitHub users as assignees to the issue.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/assignees/#add-assignees-to-an-issue
-func (s *IssuesService) AddAssignees(owner, repo string, number int, assignees []string) (*Issue, *Response, error) {
- users := &struct {
- Assignees []string `json:"assignees,omitempty"`
- }{Assignees: assignees}
- u := fmt.Sprintf("repos/%v/%v/issues/%v/assignees", owner, repo, number)
- req, err := s.client.NewRequest("POST", u, users)
- if err != nil {
- return nil, nil, err
- }
-
- issue := &Issue{}
- resp, err := s.client.Do(req, issue)
- return issue, resp, err
-}
-
-// RemoveAssignees removes the provided GitHub users as assignees from the issue.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/assignees/#remove-assignees-from-an-issue
-func (s *IssuesService) RemoveAssignees(owner, repo string, number int, assignees []string) (*Issue, *Response, error) {
- users := &struct {
- Assignees []string `json:"assignees,omitempty"`
- }{Assignees: assignees}
- u := fmt.Sprintf("repos/%v/%v/issues/%v/assignees", owner, repo, number)
- req, err := s.client.NewRequest("DELETE", u, users)
- if err != nil {
- return nil, nil, err
- }
-
- issue := &Issue{}
- resp, err := s.client.Do(req, issue)
- return issue, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_assignees_test.go b/vendor/src/github.com/google/go-github/github/issues_assignees_test.go
deleted file mode 100644
index 73d521e..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_assignees_test.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestIssuesService_ListAssignees(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/assignees", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- assignees, _, err := client.Issues.ListAssignees("o", "r", opt)
- if err != nil {
- t.Errorf("Issues.ListAssignees returned error: %v", err)
- }
-
- want := []*User{{ID: Int(1)}}
- if !reflect.DeepEqual(assignees, want) {
- t.Errorf("Issues.ListAssignees returned %+v, want %+v", assignees, want)
- }
-}
-
-func TestIssuesService_ListAssignees_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.ListAssignees("%", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_IsAssignee_true(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/assignees/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- })
-
- assignee, _, err := client.Issues.IsAssignee("o", "r", "u")
- if err != nil {
- t.Errorf("Issues.IsAssignee returned error: %v", err)
- }
- if want := true; assignee != want {
- t.Errorf("Issues.IsAssignee returned %+v, want %+v", assignee, want)
- }
-}
-
-func TestIssuesService_IsAssignee_false(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/assignees/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNotFound)
- })
-
- assignee, _, err := client.Issues.IsAssignee("o", "r", "u")
- if err != nil {
- t.Errorf("Issues.IsAssignee returned error: %v", err)
- }
- if want := false; assignee != want {
- t.Errorf("Issues.IsAssignee returned %+v, want %+v", assignee, want)
- }
-}
-
-func TestIssuesService_IsAssignee_error(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/assignees/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- http.Error(w, "BadRequest", http.StatusBadRequest)
- })
-
- assignee, _, err := client.Issues.IsAssignee("o", "r", "u")
- if err == nil {
- t.Errorf("Expected HTTP 400 response")
- }
- if want := false; assignee != want {
- t.Errorf("Issues.IsAssignee returned %+v, want %+v", assignee, want)
- }
-}
-
-func TestIssuesService_IsAssignee_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.IsAssignee("%", "r", "u")
- testURLParseError(t, err)
-}
-
-func TestIssuesService_AddAssignees(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/assignees", func(w http.ResponseWriter, r *http.Request) {
- var assignees struct {
- Assignees []string `json:"assignees,omitempty"`
- }
- json.NewDecoder(r.Body).Decode(&assignees)
-
- testMethod(t, r, "POST")
- want := []string{"user1", "user2"}
- if !reflect.DeepEqual(assignees.Assignees, want) {
- t.Errorf("assignees = %+v, want %+v", assignees, want)
- }
- fmt.Fprint(w, `{"number":1,"assignees":[{"login":"user1"},{"login":"user2"}]}`)
- })
-
- got, _, err := client.Issues.AddAssignees("o", "r", 1, []string{"user1", "user2"})
- if err != nil {
- t.Errorf("Issues.AddAssignees returned error: %v", err)
- }
-
- want := &Issue{Number: Int(1), Assignees: []*User{{Login: String("user1")}, {Login: String("user2")}}}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Issues.AddAssignees = %+v, want %+v", got, want)
- }
-}
-
-func TestIssuesService_RemoveAssignees(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/assignees", func(w http.ResponseWriter, r *http.Request) {
- var assignees struct {
- Assignees []string `json:"assignees,omitempty"`
- }
- json.NewDecoder(r.Body).Decode(&assignees)
-
- testMethod(t, r, "DELETE")
- want := []string{"user1", "user2"}
- if !reflect.DeepEqual(assignees.Assignees, want) {
- t.Errorf("assignees = %+v, want %+v", assignees, want)
- }
- fmt.Fprint(w, `{"number":1,"assignees":[]}`)
- })
-
- got, _, err := client.Issues.RemoveAssignees("o", "r", 1, []string{"user1", "user2"})
- if err != nil {
- t.Errorf("Issues.RemoveAssignees returned error: %v", err)
- }
-
- want := &Issue{Number: Int(1), Assignees: []*User{}}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Issues.RemoveAssignees = %+v, want %+v", got, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_comments.go b/vendor/src/github.com/google/go-github/github/issues_comments.go
deleted file mode 100644
index b24c5ae..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_comments.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// IssueComment represents a comment left on an issue.
-type IssueComment struct {
- ID *int `json:"id,omitempty"`
- Body *string `json:"body,omitempty"`
- User *User `json:"user,omitempty"`
- Reactions *Reactions `json:"reactions,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- IssueURL *string `json:"issue_url,omitempty"`
-}
-
-func (i IssueComment) String() string {
- return Stringify(i)
-}
-
-// IssueListCommentsOptions specifies the optional parameters to the
-// IssuesService.ListComments method.
-type IssueListCommentsOptions struct {
- // Sort specifies how to sort comments. Possible values are: created, updated.
- Sort string `url:"sort,omitempty"`
-
- // Direction in which to sort comments. Possible values are: asc, desc.
- Direction string `url:"direction,omitempty"`
-
- // Since filters comments by time.
- Since time.Time `url:"since,omitempty"`
-
- ListOptions
-}
-
-// ListComments lists all comments on the specified issue. Specifying an issue
-// number of 0 will return all comments on all issues for the repository.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/comments/#list-comments-on-an-issue
-func (s *IssuesService) ListComments(owner string, repo string, number int, opt *IssueListCommentsOptions) ([]*IssueComment, *Response, error) {
- var u string
- if number == 0 {
- u = fmt.Sprintf("repos/%v/%v/issues/comments", owner, repo)
- } else {
- u = fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number)
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- comments := new([]*IssueComment)
- resp, err := s.client.Do(req, comments)
- if err != nil {
- return nil, resp, err
- }
-
- return *comments, resp, err
-}
-
-// GetComment fetches the specified issue comment.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/comments/#get-a-single-comment
-func (s *IssuesService) GetComment(owner string, repo string, id int) (*IssueComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- comment := new(IssueComment)
- resp, err := s.client.Do(req, comment)
- if err != nil {
- return nil, resp, err
- }
-
- return comment, resp, err
-}
-
-// CreateComment creates a new comment on the specified issue.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/comments/#create-a-comment
-func (s *IssuesService) CreateComment(owner string, repo string, number int, comment *IssueComment) (*IssueComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number)
- req, err := s.client.NewRequest("POST", u, comment)
- if err != nil {
- return nil, nil, err
- }
- c := new(IssueComment)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// EditComment updates an issue comment.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/comments/#edit-a-comment
-func (s *IssuesService) EditComment(owner string, repo string, id int, comment *IssueComment) (*IssueComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
- req, err := s.client.NewRequest("PATCH", u, comment)
- if err != nil {
- return nil, nil, err
- }
- c := new(IssueComment)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// DeleteComment deletes an issue comment.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/comments/#delete-a-comment
-func (s *IssuesService) DeleteComment(owner string, repo string, id int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_comments_test.go b/vendor/src/github.com/google/go-github/github/issues_comments_test.go
deleted file mode 100644
index b3a0ec1..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_comments_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
- "time"
-)
-
-func TestIssuesService_ListComments_allIssues(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/comments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- testFormValues(t, r, values{
- "sort": "updated",
- "direction": "desc",
- "since": "2002-02-10T15:30:00Z",
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &IssueListCommentsOptions{
- Sort: "updated",
- Direction: "desc",
- Since: time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC),
- ListOptions: ListOptions{Page: 2},
- }
- comments, _, err := client.Issues.ListComments("o", "r", 0, opt)
- if err != nil {
- t.Errorf("Issues.ListComments returned error: %v", err)
- }
-
- want := []*IssueComment{{ID: Int(1)}}
- if !reflect.DeepEqual(comments, want) {
- t.Errorf("Issues.ListComments returned %+v, want %+v", comments, want)
- }
-}
-
-func TestIssuesService_ListComments_specificIssue(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/comments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- comments, _, err := client.Issues.ListComments("o", "r", 1, nil)
- if err != nil {
- t.Errorf("Issues.ListComments returned error: %v", err)
- }
-
- want := []*IssueComment{{ID: Int(1)}}
- if !reflect.DeepEqual(comments, want) {
- t.Errorf("Issues.ListComments returned %+v, want %+v", comments, want)
- }
-}
-
-func TestIssuesService_ListComments_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.ListComments("%", "r", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_GetComment(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/comments/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.Issues.GetComment("o", "r", 1)
- if err != nil {
- t.Errorf("Issues.GetComment returned error: %v", err)
- }
-
- want := &IssueComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("Issues.GetComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestIssuesService_GetComment_invalidOrg(t *testing.T) {
- _, _, err := client.Issues.GetComment("%", "r", 1)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_CreateComment(t *testing.T) {
- setup()
- defer teardown()
-
- input := &IssueComment{Body: String("b")}
-
- mux.HandleFunc("/repos/o/r/issues/1/comments", func(w http.ResponseWriter, r *http.Request) {
- v := new(IssueComment)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.Issues.CreateComment("o", "r", 1, input)
- if err != nil {
- t.Errorf("Issues.CreateComment returned error: %v", err)
- }
-
- want := &IssueComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("Issues.CreateComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestIssuesService_CreateComment_invalidOrg(t *testing.T) {
- _, _, err := client.Issues.CreateComment("%", "r", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_EditComment(t *testing.T) {
- setup()
- defer teardown()
-
- input := &IssueComment{Body: String("b")}
-
- mux.HandleFunc("/repos/o/r/issues/comments/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(IssueComment)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.Issues.EditComment("o", "r", 1, input)
- if err != nil {
- t.Errorf("Issues.EditComment returned error: %v", err)
- }
-
- want := &IssueComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("Issues.EditComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestIssuesService_EditComment_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.EditComment("%", "r", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_DeleteComment(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/comments/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Issues.DeleteComment("o", "r", 1)
- if err != nil {
- t.Errorf("Issues.DeleteComments returned error: %v", err)
- }
-}
-
-func TestIssuesService_DeleteComment_invalidOwner(t *testing.T) {
- _, err := client.Issues.DeleteComment("%", "r", 1)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_events.go b/vendor/src/github.com/google/go-github/github/issues_events.go
deleted file mode 100644
index 71cf61a..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_events.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// IssueEvent represents an event that occurred around an Issue or Pull Request.
-type IssueEvent struct {
- ID *int `json:"id,omitempty"`
- URL *string `json:"url,omitempty"`
-
- // The User that generated this event.
- Actor *User `json:"actor,omitempty"`
-
- // Event identifies the actual type of Event that occurred. Possible
- // values are:
- //
- // closed
- // The Actor closed the issue.
- // If the issue was closed by commit message, CommitID holds the SHA1 hash of the commit.
- //
- // merged
- // The Actor merged into master a branch containing a commit mentioning the issue.
- // CommitID holds the SHA1 of the merge commit.
- //
- // referenced
- // The Actor committed to master a commit mentioning the issue in its commit message.
- // CommitID holds the SHA1 of the commit.
- //
- // reopened, locked, unlocked
- // The Actor did that to the issue.
- //
- // renamed
- // The Actor changed the issue title from Rename.From to Rename.To.
- //
- // mentioned
- // Someone unspecified @mentioned the Actor [sic] in an issue comment body.
- //
- // assigned, unassigned
- // The Actor assigned the issue to or removed the assignment from the Assignee.
- //
- // labeled, unlabeled
- // The Actor added or removed the Label from the issue.
- //
- // milestoned, demilestoned
- // The Actor added or removed the issue from the Milestone.
- //
- // subscribed, unsubscribed
- // The Actor subscribed to or unsubscribed from notifications for an issue.
- //
- // head_ref_deleted, head_ref_restored
- // The pull request’s branch was deleted or restored.
- //
- Event *string `json:"event,omitempty"`
-
- CreatedAt *time.Time `json:"created_at,omitempty"`
- Issue *Issue `json:"issue,omitempty"`
-
- // Only present on certain events; see above.
- Assignee *User `json:"assignee,omitempty"`
- CommitID *string `json:"commit_id,omitempty"`
- Milestone *Milestone `json:"milestone,omitempty"`
- Label *Label `json:"label,omitempty"`
- Rename *Rename `json:"rename,omitempty"`
-}
-
-// ListIssueEvents lists events for the specified issue.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/events/#list-events-for-an-issue
-func (s *IssuesService) ListIssueEvents(owner, repo string, number int, opt *ListOptions) ([]*IssueEvent, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%v/events", owner, repo, number)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var events []*IssueEvent
- resp, err := s.client.Do(req, &events)
- if err != nil {
- return nil, resp, err
- }
-
- return events, resp, err
-}
-
-// ListRepositoryEvents lists events for the specified repository.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/events/#list-events-for-a-repository
-func (s *IssuesService) ListRepositoryEvents(owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var events []*IssueEvent
- resp, err := s.client.Do(req, &events)
- if err != nil {
- return nil, resp, err
- }
-
- return events, resp, err
-}
-
-// GetEvent returns the specified issue event.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/events/#get-a-single-event
-func (s *IssuesService) GetEvent(owner, repo string, id int) (*IssueEvent, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/events/%v", owner, repo, id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- event := new(IssueEvent)
- resp, err := s.client.Do(req, event)
- if err != nil {
- return nil, resp, err
- }
-
- return event, resp, err
-}
-
-// Rename contains details for 'renamed' events.
-type Rename struct {
- From *string `json:"from,omitempty"`
- To *string `json:"to,omitempty"`
-}
-
-func (r Rename) String() string {
- return Stringify(r)
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_events_test.go b/vendor/src/github.com/google/go-github/github/issues_events_test.go
deleted file mode 100644
index 2250432..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_events_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestIssuesService_ListIssueEvents(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/events", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "1",
- "per_page": "2",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 1, PerPage: 2}
- events, _, err := client.Issues.ListIssueEvents("o", "r", 1, opt)
- if err != nil {
- t.Errorf("Issues.ListIssueEvents returned error: %v", err)
- }
-
- want := []*IssueEvent{{ID: Int(1)}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Issues.ListIssueEvents returned %+v, want %+v", events, want)
- }
-}
-
-func TestIssuesService_ListRepositoryEvents(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/events", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "page": "1",
- "per_page": "2",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 1, PerPage: 2}
- events, _, err := client.Issues.ListRepositoryEvents("o", "r", opt)
- if err != nil {
- t.Errorf("Issues.ListRepositoryEvents returned error: %v", err)
- }
-
- want := []*IssueEvent{{ID: Int(1)}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Issues.ListRepositoryEvents returned %+v, want %+v", events, want)
- }
-}
-
-func TestIssuesService_GetEvent(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/events/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- event, _, err := client.Issues.GetEvent("o", "r", 1)
- if err != nil {
- t.Errorf("Issues.GetEvent returned error: %v", err)
- }
-
- want := &IssueEvent{ID: Int(1)}
- if !reflect.DeepEqual(event, want) {
- t.Errorf("Issues.GetEvent returned %+v, want %+v", event, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_labels.go b/vendor/src/github.com/google/go-github/github/issues_labels.go
deleted file mode 100644
index c654547..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_labels.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// Label represents a GitHub label on an Issue
-type Label struct {
- URL *string `json:"url,omitempty"`
- Name *string `json:"name,omitempty"`
- Color *string `json:"color,omitempty"`
-}
-
-func (l Label) String() string {
- return fmt.Sprint(*l.Name)
-}
-
-// ListLabels lists all labels for a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository
-func (s *IssuesService) ListLabels(owner string, repo string, opt *ListOptions) ([]*Label, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/labels", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- labels := new([]*Label)
- resp, err := s.client.Do(req, labels)
- if err != nil {
- return nil, resp, err
- }
-
- return *labels, resp, err
-}
-
-// GetLabel gets a single label.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#get-a-single-label
-func (s *IssuesService) GetLabel(owner string, repo string, name string) (*Label, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- label := new(Label)
- resp, err := s.client.Do(req, label)
- if err != nil {
- return nil, resp, err
- }
-
- return label, resp, err
-}
-
-// CreateLabel creates a new label on the specified repository.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#create-a-label
-func (s *IssuesService) CreateLabel(owner string, repo string, label *Label) (*Label, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/labels", owner, repo)
- req, err := s.client.NewRequest("POST", u, label)
- if err != nil {
- return nil, nil, err
- }
-
- l := new(Label)
- resp, err := s.client.Do(req, l)
- if err != nil {
- return nil, resp, err
- }
-
- return l, resp, err
-}
-
-// EditLabel edits a label.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#update-a-label
-func (s *IssuesService) EditLabel(owner string, repo string, name string, label *Label) (*Label, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name)
- req, err := s.client.NewRequest("PATCH", u, label)
- if err != nil {
- return nil, nil, err
- }
-
- l := new(Label)
- resp, err := s.client.Do(req, l)
- if err != nil {
- return nil, resp, err
- }
-
- return l, resp, err
-}
-
-// DeleteLabel deletes a label.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#delete-a-label
-func (s *IssuesService) DeleteLabel(owner string, repo string, name string) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// ListLabelsByIssue lists all labels for an issue.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository
-func (s *IssuesService) ListLabelsByIssue(owner string, repo string, number int, opt *ListOptions) ([]*Label, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- labels := new([]*Label)
- resp, err := s.client.Do(req, labels)
- if err != nil {
- return nil, resp, err
- }
-
- return *labels, resp, err
-}
-
-// AddLabelsToIssue adds labels to an issue.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository
-func (s *IssuesService) AddLabelsToIssue(owner string, repo string, number int, labels []string) ([]*Label, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
- req, err := s.client.NewRequest("POST", u, labels)
- if err != nil {
- return nil, nil, err
- }
-
- l := new([]*Label)
- resp, err := s.client.Do(req, l)
- if err != nil {
- return nil, resp, err
- }
-
- return *l, resp, err
-}
-
-// RemoveLabelForIssue removes a label for an issue.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#remove-a-label-from-an-issue
-func (s *IssuesService) RemoveLabelForIssue(owner string, repo string, number int, label string) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%d/labels/%v", owner, repo, number, label)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// ReplaceLabelsForIssue replaces all labels for an issue.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#replace-all-labels-for-an-issue
-func (s *IssuesService) ReplaceLabelsForIssue(owner string, repo string, number int, labels []string) ([]*Label, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
- req, err := s.client.NewRequest("PUT", u, labels)
- if err != nil {
- return nil, nil, err
- }
-
- l := new([]*Label)
- resp, err := s.client.Do(req, l)
- if err != nil {
- return nil, resp, err
- }
-
- return *l, resp, err
-}
-
-// RemoveLabelsForIssue removes all labels for an issue.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#remove-all-labels-from-an-issue
-func (s *IssuesService) RemoveLabelsForIssue(owner string, repo string, number int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// ListLabelsForMilestone lists labels for every issue in a milestone.
-//
-// GitHub API docs: http://developer.github.com/v3/issues/labels/#get-labels-for-every-issue-in-a-milestone
-func (s *IssuesService) ListLabelsForMilestone(owner string, repo string, number int, opt *ListOptions) ([]*Label, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/milestones/%d/labels", owner, repo, number)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- labels := new([]*Label)
- resp, err := s.client.Do(req, labels)
- if err != nil {
- return nil, resp, err
- }
-
- return *labels, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_labels_test.go b/vendor/src/github.com/google/go-github/github/issues_labels_test.go
deleted file mode 100644
index e6ae59b..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_labels_test.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestIssuesService_ListLabels(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/labels", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"name": "a"},{"name": "b"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- labels, _, err := client.Issues.ListLabels("o", "r", opt)
- if err != nil {
- t.Errorf("Issues.ListLabels returned error: %v", err)
- }
-
- want := []*Label{{Name: String("a")}, {Name: String("b")}}
- if !reflect.DeepEqual(labels, want) {
- t.Errorf("Issues.ListLabels returned %+v, want %+v", labels, want)
- }
-}
-
-func TestIssuesService_ListLabels_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.ListLabels("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_GetLabel(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/labels/n", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"url":"u", "name": "n", "color": "c"}`)
- })
-
- label, _, err := client.Issues.GetLabel("o", "r", "n")
- if err != nil {
- t.Errorf("Issues.GetLabel returned error: %v", err)
- }
-
- want := &Label{URL: String("u"), Name: String("n"), Color: String("c")}
- if !reflect.DeepEqual(label, want) {
- t.Errorf("Issues.GetLabel returned %+v, want %+v", label, want)
- }
-}
-
-func TestIssuesService_GetLabel_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.GetLabel("%", "%", "%")
- testURLParseError(t, err)
-}
-
-func TestIssuesService_CreateLabel(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Label{Name: String("n")}
-
- mux.HandleFunc("/repos/o/r/labels", func(w http.ResponseWriter, r *http.Request) {
- v := new(Label)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"url":"u"}`)
- })
-
- label, _, err := client.Issues.CreateLabel("o", "r", input)
- if err != nil {
- t.Errorf("Issues.CreateLabel returned error: %v", err)
- }
-
- want := &Label{URL: String("u")}
- if !reflect.DeepEqual(label, want) {
- t.Errorf("Issues.CreateLabel returned %+v, want %+v", label, want)
- }
-}
-
-func TestIssuesService_CreateLabel_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.CreateLabel("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_EditLabel(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Label{Name: String("z")}
-
- mux.HandleFunc("/repos/o/r/labels/n", func(w http.ResponseWriter, r *http.Request) {
- v := new(Label)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"url":"u"}`)
- })
-
- label, _, err := client.Issues.EditLabel("o", "r", "n", input)
- if err != nil {
- t.Errorf("Issues.EditLabel returned error: %v", err)
- }
-
- want := &Label{URL: String("u")}
- if !reflect.DeepEqual(label, want) {
- t.Errorf("Issues.EditLabel returned %+v, want %+v", label, want)
- }
-}
-
-func TestIssuesService_EditLabel_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.EditLabel("%", "%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_DeleteLabel(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/labels/n", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Issues.DeleteLabel("o", "r", "n")
- if err != nil {
- t.Errorf("Issues.DeleteLabel returned error: %v", err)
- }
-}
-
-func TestIssuesService_DeleteLabel_invalidOwner(t *testing.T) {
- _, err := client.Issues.DeleteLabel("%", "%", "%")
- testURLParseError(t, err)
-}
-
-func TestIssuesService_ListLabelsByIssue(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"name": "a"},{"name": "b"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- labels, _, err := client.Issues.ListLabelsByIssue("o", "r", 1, opt)
- if err != nil {
- t.Errorf("Issues.ListLabelsByIssue returned error: %v", err)
- }
-
- want := []*Label{{Name: String("a")}, {Name: String("b")}}
- if !reflect.DeepEqual(labels, want) {
- t.Errorf("Issues.ListLabelsByIssue returned %+v, want %+v", labels, want)
- }
-}
-
-func TestIssuesService_ListLabelsByIssue_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.ListLabelsByIssue("%", "%", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_AddLabelsToIssue(t *testing.T) {
- setup()
- defer teardown()
-
- input := []string{"a", "b"}
-
- mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) {
- v := new([]string)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(*v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `[{"url":"u"}]`)
- })
-
- labels, _, err := client.Issues.AddLabelsToIssue("o", "r", 1, input)
- if err != nil {
- t.Errorf("Issues.AddLabelsToIssue returned error: %v", err)
- }
-
- want := []*Label{{URL: String("u")}}
- if !reflect.DeepEqual(labels, want) {
- t.Errorf("Issues.AddLabelsToIssue returned %+v, want %+v", labels, want)
- }
-}
-
-func TestIssuesService_AddLabelsToIssue_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.AddLabelsToIssue("%", "%", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_RemoveLabelForIssue(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/labels/l", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Issues.RemoveLabelForIssue("o", "r", 1, "l")
- if err != nil {
- t.Errorf("Issues.RemoveLabelForIssue returned error: %v", err)
- }
-}
-
-func TestIssuesService_RemoveLabelForIssue_invalidOwner(t *testing.T) {
- _, err := client.Issues.RemoveLabelForIssue("%", "%", 1, "%")
- testURLParseError(t, err)
-}
-
-func TestIssuesService_ReplaceLabelsForIssue(t *testing.T) {
- setup()
- defer teardown()
-
- input := []string{"a", "b"}
-
- mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) {
- v := new([]string)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PUT")
- if !reflect.DeepEqual(*v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `[{"url":"u"}]`)
- })
-
- labels, _, err := client.Issues.ReplaceLabelsForIssue("o", "r", 1, input)
- if err != nil {
- t.Errorf("Issues.ReplaceLabelsForIssue returned error: %v", err)
- }
-
- want := []*Label{{URL: String("u")}}
- if !reflect.DeepEqual(labels, want) {
- t.Errorf("Issues.ReplaceLabelsForIssue returned %+v, want %+v", labels, want)
- }
-}
-
-func TestIssuesService_ReplaceLabelsForIssue_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.ReplaceLabelsForIssue("%", "%", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_RemoveLabelsForIssue(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/labels", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Issues.RemoveLabelsForIssue("o", "r", 1)
- if err != nil {
- t.Errorf("Issues.RemoveLabelsForIssue returned error: %v", err)
- }
-}
-
-func TestIssuesService_RemoveLabelsForIssue_invalidOwner(t *testing.T) {
- _, err := client.Issues.RemoveLabelsForIssue("%", "%", 1)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_ListLabelsForMilestone(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/milestones/1/labels", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"name": "a"},{"name": "b"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- labels, _, err := client.Issues.ListLabelsForMilestone("o", "r", 1, opt)
- if err != nil {
- t.Errorf("Issues.ListLabelsForMilestone returned error: %v", err)
- }
-
- want := []*Label{{Name: String("a")}, {Name: String("b")}}
- if !reflect.DeepEqual(labels, want) {
- t.Errorf("Issues.ListLabelsForMilestone returned %+v, want %+v", labels, want)
- }
-}
-
-func TestIssuesService_ListLabelsForMilestone_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.ListLabelsForMilestone("%", "%", 1, nil)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_milestones.go b/vendor/src/github.com/google/go-github/github/issues_milestones.go
deleted file mode 100644
index b7621ac..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_milestones.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// Milestone represents a Github repository milestone.
-type Milestone struct {
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- LabelsURL *string `json:"labels_url,omitempty"`
- ID *int `json:"id,omitempty"`
- Number *int `json:"number,omitempty"`
- State *string `json:"state,omitempty"`
- Title *string `json:"title,omitempty"`
- Description *string `json:"description,omitempty"`
- Creator *User `json:"creator,omitempty"`
- OpenIssues *int `json:"open_issues,omitempty"`
- ClosedIssues *int `json:"closed_issues,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
- ClosedAt *time.Time `json:"closed_at,omitempty"`
- DueOn *time.Time `json:"due_on,omitempty"`
-}
-
-func (m Milestone) String() string {
- return Stringify(m)
-}
-
-// MilestoneListOptions specifies the optional parameters to the
-// IssuesService.ListMilestones method.
-type MilestoneListOptions struct {
- // State filters milestones based on their state. Possible values are:
- // open, closed. Default is "open".
- State string `url:"state,omitempty"`
-
- // Sort specifies how to sort milestones. Possible values are: due_date, completeness.
- // Default value is "due_date".
- Sort string `url:"sort,omitempty"`
-
- // Direction in which to sort milestones. Possible values are: asc, desc.
- // Default is "asc".
- Direction string `url:"direction,omitempty"`
-
- ListOptions
-}
-
-// ListMilestones lists all milestones for a repository.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/milestones/#list-milestones-for-a-repository
-func (s *IssuesService) ListMilestones(owner string, repo string, opt *MilestoneListOptions) ([]*Milestone, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- milestones := new([]*Milestone)
- resp, err := s.client.Do(req, milestones)
- if err != nil {
- return nil, resp, err
- }
-
- return *milestones, resp, err
-}
-
-// GetMilestone gets a single milestone.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/milestones/#get-a-single-milestone
-func (s *IssuesService) GetMilestone(owner string, repo string, number int) (*Milestone, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- milestone := new(Milestone)
- resp, err := s.client.Do(req, milestone)
- if err != nil {
- return nil, resp, err
- }
-
- return milestone, resp, err
-}
-
-// CreateMilestone creates a new milestone on the specified repository.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/milestones/#create-a-milestone
-func (s *IssuesService) CreateMilestone(owner string, repo string, milestone *Milestone) (*Milestone, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo)
- req, err := s.client.NewRequest("POST", u, milestone)
- if err != nil {
- return nil, nil, err
- }
-
- m := new(Milestone)
- resp, err := s.client.Do(req, m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, err
-}
-
-// EditMilestone edits a milestone.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/milestones/#update-a-milestone
-func (s *IssuesService) EditMilestone(owner string, repo string, number int, milestone *Milestone) (*Milestone, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number)
- req, err := s.client.NewRequest("PATCH", u, milestone)
- if err != nil {
- return nil, nil, err
- }
-
- m := new(Milestone)
- resp, err := s.client.Do(req, m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, err
-}
-
-// DeleteMilestone deletes a milestone.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/milestones/#delete-a-milestone
-func (s *IssuesService) DeleteMilestone(owner string, repo string, number int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_milestones_test.go b/vendor/src/github.com/google/go-github/github/issues_milestones_test.go
deleted file mode 100644
index 11bf4d3..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_milestones_test.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestIssuesService_ListMilestones(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/milestones", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "state": "closed",
- "sort": "due_date",
- "direction": "asc",
- "page": "2",
- })
- fmt.Fprint(w, `[{"number":1}]`)
- })
-
- opt := &MilestoneListOptions{"closed", "due_date", "asc", ListOptions{Page: 2}}
- milestones, _, err := client.Issues.ListMilestones("o", "r", opt)
- if err != nil {
- t.Errorf("IssuesService.ListMilestones returned error: %v", err)
- }
-
- want := []*Milestone{{Number: Int(1)}}
- if !reflect.DeepEqual(milestones, want) {
- t.Errorf("IssuesService.ListMilestones returned %+v, want %+v", milestones, want)
- }
-}
-
-func TestIssuesService_ListMilestones_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.ListMilestones("%", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_GetMilestone(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/milestones/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"number":1}`)
- })
-
- milestone, _, err := client.Issues.GetMilestone("o", "r", 1)
- if err != nil {
- t.Errorf("IssuesService.GetMilestone returned error: %v", err)
- }
-
- want := &Milestone{Number: Int(1)}
- if !reflect.DeepEqual(milestone, want) {
- t.Errorf("IssuesService.GetMilestone returned %+v, want %+v", milestone, want)
- }
-}
-
-func TestIssuesService_GetMilestone_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.GetMilestone("%", "r", 1)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_CreateMilestone(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Milestone{Title: String("t")}
-
- mux.HandleFunc("/repos/o/r/milestones", func(w http.ResponseWriter, r *http.Request) {
- v := new(Milestone)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"number":1}`)
- })
-
- milestone, _, err := client.Issues.CreateMilestone("o", "r", input)
- if err != nil {
- t.Errorf("IssuesService.CreateMilestone returned error: %v", err)
- }
-
- want := &Milestone{Number: Int(1)}
- if !reflect.DeepEqual(milestone, want) {
- t.Errorf("IssuesService.CreateMilestone returned %+v, want %+v", milestone, want)
- }
-}
-
-func TestIssuesService_CreateMilestone_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.CreateMilestone("%", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_EditMilestone(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Milestone{Title: String("t")}
-
- mux.HandleFunc("/repos/o/r/milestones/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(Milestone)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"number":1}`)
- })
-
- milestone, _, err := client.Issues.EditMilestone("o", "r", 1, input)
- if err != nil {
- t.Errorf("IssuesService.EditMilestone returned error: %v", err)
- }
-
- want := &Milestone{Number: Int(1)}
- if !reflect.DeepEqual(milestone, want) {
- t.Errorf("IssuesService.EditMilestone returned %+v, want %+v", milestone, want)
- }
-}
-
-func TestIssuesService_EditMilestone_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.EditMilestone("%", "r", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_DeleteMilestone(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/milestones/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Issues.DeleteMilestone("o", "r", 1)
- if err != nil {
- t.Errorf("IssuesService.DeleteMilestone returned error: %v", err)
- }
-}
-
-func TestIssuesService_DeleteMilestone_invalidOwner(t *testing.T) {
- _, err := client.Issues.DeleteMilestone("%", "r", 1)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_test.go b/vendor/src/github.com/google/go-github/github/issues_test.go
deleted file mode 100644
index 5e6369e..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_test.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
- "time"
-)
-
-func TestIssuesService_List_all(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/issues", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- testFormValues(t, r, values{
- "filter": "all",
- "state": "closed",
- "labels": "a,b",
- "sort": "updated",
- "direction": "asc",
- "since": "2002-02-10T15:30:00Z",
- "page": "1",
- "per_page": "2",
- })
- fmt.Fprint(w, `[{"number":1}]`)
- })
-
- opt := &IssueListOptions{
- "all", "closed", []string{"a", "b"}, "updated", "asc",
- time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC),
- ListOptions{Page: 1, PerPage: 2},
- }
- issues, _, err := client.Issues.List(true, opt)
- if err != nil {
- t.Errorf("Issues.List returned error: %v", err)
- }
-
- want := []*Issue{{Number: Int(1)}}
- if !reflect.DeepEqual(issues, want) {
- t.Errorf("Issues.List returned %+v, want %+v", issues, want)
- }
-}
-
-func TestIssuesService_List_owned(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/issues", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- fmt.Fprint(w, `[{"number":1}]`)
- })
-
- issues, _, err := client.Issues.List(false, nil)
- if err != nil {
- t.Errorf("Issues.List returned error: %v", err)
- }
-
- want := []*Issue{{Number: Int(1)}}
- if !reflect.DeepEqual(issues, want) {
- t.Errorf("Issues.List returned %+v, want %+v", issues, want)
- }
-}
-
-func TestIssuesService_ListByOrg(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/issues", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- fmt.Fprint(w, `[{"number":1}]`)
- })
-
- issues, _, err := client.Issues.ListByOrg("o", nil)
- if err != nil {
- t.Errorf("Issues.ListByOrg returned error: %v", err)
- }
-
- want := []*Issue{{Number: Int(1)}}
- if !reflect.DeepEqual(issues, want) {
- t.Errorf("Issues.List returned %+v, want %+v", issues, want)
- }
-}
-
-func TestIssuesService_ListByOrg_invalidOrg(t *testing.T) {
- _, _, err := client.Issues.ListByOrg("%", nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_ListByRepo(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- testFormValues(t, r, values{
- "milestone": "*",
- "state": "closed",
- "assignee": "a",
- "creator": "c",
- "mentioned": "m",
- "labels": "a,b",
- "sort": "updated",
- "direction": "asc",
- "since": "2002-02-10T15:30:00Z",
- })
- fmt.Fprint(w, `[{"number":1}]`)
- })
-
- opt := &IssueListByRepoOptions{
- "*", "closed", "a", "c", "m", []string{"a", "b"}, "updated", "asc",
- time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC),
- ListOptions{0, 0},
- }
- issues, _, err := client.Issues.ListByRepo("o", "r", opt)
- if err != nil {
- t.Errorf("Issues.ListByOrg returned error: %v", err)
- }
-
- want := []*Issue{{Number: Int(1)}}
- if !reflect.DeepEqual(issues, want) {
- t.Errorf("Issues.List returned %+v, want %+v", issues, want)
- }
-}
-
-func TestIssuesService_ListByRepo_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.ListByRepo("%", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_Get(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- fmt.Fprint(w, `{"number":1, "labels": [{"url": "u", "name": "n", "color": "c"}]}`)
- })
-
- issue, _, err := client.Issues.Get("o", "r", 1)
- if err != nil {
- t.Errorf("Issues.Get returned error: %v", err)
- }
-
- want := &Issue{
- Number: Int(1),
- Labels: []Label{{
- URL: String("u"),
- Name: String("n"),
- Color: String("c"),
- }},
- }
- if !reflect.DeepEqual(issue, want) {
- t.Errorf("Issues.Get returned %+v, want %+v", issue, want)
- }
-}
-
-func TestIssuesService_Get_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.Get("%", "r", 1)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_Create(t *testing.T) {
- setup()
- defer teardown()
-
- input := &IssueRequest{
- Title: String("t"),
- Body: String("b"),
- Assignee: String("a"),
- Labels: &[]string{"l1", "l2"},
- }
-
- mux.HandleFunc("/repos/o/r/issues", func(w http.ResponseWriter, r *http.Request) {
- v := new(IssueRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"number":1}`)
- })
-
- issue, _, err := client.Issues.Create("o", "r", input)
- if err != nil {
- t.Errorf("Issues.Create returned error: %v", err)
- }
-
- want := &Issue{Number: Int(1)}
- if !reflect.DeepEqual(issue, want) {
- t.Errorf("Issues.Create returned %+v, want %+v", issue, want)
- }
-}
-
-func TestIssuesService_Create_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.Create("%", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_Edit(t *testing.T) {
- setup()
- defer teardown()
-
- input := &IssueRequest{Title: String("t")}
-
- mux.HandleFunc("/repos/o/r/issues/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(IssueRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"number":1}`)
- })
-
- issue, _, err := client.Issues.Edit("o", "r", 1, input)
- if err != nil {
- t.Errorf("Issues.Edit returned error: %v", err)
- }
-
- want := &Issue{Number: Int(1)}
- if !reflect.DeepEqual(issue, want) {
- t.Errorf("Issues.Edit returned %+v, want %+v", issue, want)
- }
-}
-
-func TestIssuesService_Edit_invalidOwner(t *testing.T) {
- _, _, err := client.Issues.Edit("%", "r", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestIssuesService_Lock(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/lock", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
-
- w.WriteHeader(http.StatusNoContent)
- })
-
- if _, err := client.Issues.Lock("o", "r", 1); err != nil {
- t.Errorf("Issues.Lock returned error: %v", err)
- }
-}
-
-func TestIssuesService_Unlock(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/lock", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
-
- w.WriteHeader(http.StatusNoContent)
- })
-
- if _, err := client.Issues.Unlock("o", "r", 1); err != nil {
- t.Errorf("Issues.Unlock returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_timeline.go b/vendor/src/github.com/google/go-github/github/issues_timeline.go
deleted file mode 100644
index d20eef8..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_timeline.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// Timeline represents an event that occurred around an Issue or Pull Request.
-//
-// It is similar to an IssueEvent but may contain more information.
-// GitHub API docs: https://developer.github.com/v3/issues/timeline/
-type Timeline struct {
- ID *int `json:"id,omitempty"`
- URL *string `json:"url,omitempty"`
- CommitURL *string `json:"commit_url,omitempty"`
-
- // The User object that generated the event.
- Actor *User `json:"actor,omitempty"`
-
- // Event identifies the actual type of Event that occurred. Possible values
- // are:
- //
- // assigned
- // The issue was assigned to the assignee.
- //
- // closed
- // The issue was closed by the actor. When the commit_id is present, it
- // identifies the commit that closed the issue using "closes / fixes #NN"
- // syntax.
- //
- // commented
- // A comment was added to the issue.
- //
- // committed
- // A commit was added to the pull request's 'HEAD' branch. Only provided
- // for pull requests.
- //
- // cross-referenced
- // The issue was referenced from another issue. The 'source' attribute
- // contains the 'id', 'actor', and 'url' of the reference's source.
- //
- // demilestoned
- // The issue was removed from a milestone.
- //
- // head_ref_deleted
- // The pull request's branch was deleted.
- //
- // head_ref_restored
- // The pull request's branch was restored.
- //
- // labeled
- // A label was added to the issue.
- //
- // locked
- // The issue was locked by the actor.
- //
- // mentioned
- // The actor was @mentioned in an issue body.
- //
- // merged
- // The issue was merged by the actor. The 'commit_id' attribute is the
- // SHA1 of the HEAD commit that was merged.
- //
- // milestoned
- // The issue was added to a milestone.
- //
- // referenced
- // The issue was referenced from a commit message. The 'commit_id'
- // attribute is the commit SHA1 of where that happened.
- //
- // renamed
- // The issue title was changed.
- //
- // reopened
- // The issue was reopened by the actor.
- //
- // subscribed
- // The actor subscribed to receive notifications for an issue.
- //
- // unassigned
- // The assignee was unassigned from the issue.
- //
- // unlabeled
- // A label was removed from the issue.
- //
- // unlocked
- // The issue was unlocked by the actor.
- //
- // unsubscribed
- // The actor unsubscribed to stop receiving notifications for an issue.
- //
- Event *string `json:"event,omitempty"`
-
- // The string SHA of a commit that referenced this Issue or Pull Request.
- CommitID *string `json:"commit_id,omitempty"`
- // The timestamp indicating when the event occurred.
- CreatedAt *time.Time `json:"created_at,omitempty"`
- // The Label object including `name` and `color` attributes. Only provided for
- // 'labeled' and 'unlabeled' events.
- Label *Label `json:"label,omitempty"`
- // The User object which was assigned to (or unassigned from) this Issue or
- // Pull Request. Only provided for 'assigned' and 'unassigned' events.
- Assignee *User `json:"assignee,omitempty"`
- // The Milestone object including a 'title' attribute.
- // Only provided for 'milestoned' and 'demilestoned' events.
- Milestone *Milestone `json:"milestone,omitempty"`
- // The 'id', 'actor', and 'url' for the source of a reference from another issue.
- // Only provided for 'cross-referenced' events.
- Source *Source `json:"source,omitempty"`
- // An object containing rename details including 'from' and 'to' attributes.
- // Only provided for 'renamed' events.
- Rename *Rename `json:"rename,omitempty"`
-}
-
-// Source represents a reference's source.
-type Source struct {
- ID *int `json:"id,omitempty"`
- URL *string `json:"url,omitempty"`
- Actor *User `json:"actor,omitempty"`
-}
-
-// ListIssueTimeline lists events for the specified issue.
-//
-// GitHub API docs: https://developer.github.com/v3/issues/timeline/#list-events-for-an-issue
-func (s *IssuesService) ListIssueTimeline(owner, repo string, number int, opt *ListOptions) ([]*Timeline, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%v/timeline", owner, repo, number)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeTimelinePreview)
-
- var events []*Timeline
- resp, err := s.client.Do(req, &events)
- return events, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/issues_timeline_test.go b/vendor/src/github.com/google/go-github/github/issues_timeline_test.go
deleted file mode 100644
index fb67fd6..0000000
--- a/vendor/src/github.com/google/go-github/github/issues_timeline_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestIssuesService_ListIssueTimeline(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/timeline", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeTimelinePreview)
- testFormValues(t, r, values{
- "page": "1",
- "per_page": "2",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 1, PerPage: 2}
- events, _, err := client.Issues.ListIssueTimeline("o", "r", 1, opt)
- if err != nil {
- t.Errorf("Issues.ListIssueTimeline returned error: %v", err)
- }
-
- want := []*Timeline{{ID: Int(1)}}
- if !reflect.DeepEqual(events, want) {
- t.Errorf("Issues.ListIssueTimeline = %+v, want %+v", events, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/licenses.go b/vendor/src/github.com/google/go-github/github/licenses.go
deleted file mode 100644
index 35cd234..0000000
--- a/vendor/src/github.com/google/go-github/github/licenses.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// LicensesService handles communication with the license related
-// methods of the GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/pulls/
-type LicensesService service
-
-// License represents an open source license.
-type License struct {
- Key *string `json:"key,omitempty"`
- Name *string `json:"name,omitempty"`
- URL *string `json:"url,omitempty"`
-
- HTMLURL *string `json:"html_url,omitempty"`
- Featured *bool `json:"featured,omitempty"`
- Description *string `json:"description,omitempty"`
- Category *string `json:"category,omitempty"`
- Implementation *string `json:"implementation,omitempty"`
- Required *[]string `json:"required,omitempty"`
- Permitted *[]string `json:"permitted,omitempty"`
- Forbidden *[]string `json:"forbidden,omitempty"`
- Body *string `json:"body,omitempty"`
-}
-
-func (l License) String() string {
- return Stringify(l)
-}
-
-// List popular open source licenses.
-//
-// GitHub API docs: https://developer.github.com/v3/licenses/#list-all-licenses
-func (s *LicensesService) List() ([]*License, *Response, error) {
- req, err := s.client.NewRequest("GET", "licenses", nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeLicensesPreview)
-
- licenses := new([]*License)
- resp, err := s.client.Do(req, licenses)
- if err != nil {
- return nil, resp, err
- }
-
- return *licenses, resp, err
-}
-
-// Get extended metadata for one license.
-//
-// GitHub API docs: https://developer.github.com/v3/licenses/#get-an-individual-license
-func (s *LicensesService) Get(licenseName string) (*License, *Response, error) {
- u := fmt.Sprintf("licenses/%s", licenseName)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeLicensesPreview)
-
- license := new(License)
- resp, err := s.client.Do(req, license)
- if err != nil {
- return nil, resp, err
- }
-
- return license, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/licenses_test.go b/vendor/src/github.com/google/go-github/github/licenses_test.go
deleted file mode 100644
index 2319bb4..0000000
--- a/vendor/src/github.com/google/go-github/github/licenses_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestLicensesService_List(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/licenses", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeLicensesPreview)
- fmt.Fprint(w, `[{"key":"mit","name":"MIT","url":"https://api.github.com/licenses/mit"}]`)
- })
-
- licenses, _, err := client.Licenses.List()
- if err != nil {
- t.Errorf("Licenses.List returned error: %v", err)
- }
-
- want := []*License{{
- Key: String("mit"),
- Name: String("MIT"),
- URL: String("https://api.github.com/licenses/mit"),
- }}
- if !reflect.DeepEqual(licenses, want) {
- t.Errorf("Licenses.List returned %+v, want %+v", licenses, want)
- }
-}
-
-func TestLicensesService_Get(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/licenses/mit", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeLicensesPreview)
- fmt.Fprint(w, `{"key":"mit","name":"MIT"}`)
- })
-
- license, _, err := client.Licenses.Get("mit")
- if err != nil {
- t.Errorf("Licenses.Get returned error: %v", err)
- }
-
- want := &License{Key: String("mit"), Name: String("MIT")}
- if !reflect.DeepEqual(license, want) {
- t.Errorf("Licenses.Get returned %+v, want %+v", license, want)
- }
-}
-
-func TestLicensesService_Get_invalidTemplate(t *testing.T) {
- _, _, err := client.Licenses.Get("%")
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/messages.go b/vendor/src/github.com/google/go-github/github/messages.go
deleted file mode 100644
index 9f0aba9..0000000
--- a/vendor/src/github.com/google/go-github/github/messages.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file provides functions for validating payloads from GitHub Webhooks.
-// GitHub docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github
-
-package github
-
-import (
- "crypto/hmac"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/sha512"
- "encoding/hex"
- "errors"
- "fmt"
- "hash"
- "io/ioutil"
- "net/http"
- "strings"
-)
-
-const (
- // sha1Prefix is the prefix used by GitHub before the HMAC hexdigest.
- sha1Prefix = "sha1"
- // sha256Prefix and sha512Prefix are provided for future compatibility.
- sha256Prefix = "sha256"
- sha512Prefix = "sha512"
- // signatureHeader is the GitHub header key used to pass the HMAC hexdigest.
- signatureHeader = "X-Hub-Signature"
-)
-
-// genMAC generates the HMAC signature for a message provided the secret key
-// and hashFunc.
-func genMAC(message, key []byte, hashFunc func() hash.Hash) []byte {
- mac := hmac.New(hashFunc, key)
- mac.Write(message)
- return mac.Sum(nil)
-}
-
-// checkMAC reports whether messageMAC is a valid HMAC tag for message.
-func checkMAC(message, messageMAC, key []byte, hashFunc func() hash.Hash) bool {
- expectedMAC := genMAC(message, key, hashFunc)
- return hmac.Equal(messageMAC, expectedMAC)
-}
-
-// messageMAC returns the hex-decoded HMAC tag from the signature and its
-// corresponding hash function.
-func messageMAC(signature string) ([]byte, func() hash.Hash, error) {
- if signature == "" {
- return nil, nil, errors.New("missing signature")
- }
- sigParts := strings.SplitN(signature, "=", 2)
- if len(sigParts) != 2 {
- return nil, nil, fmt.Errorf("error parsing signature %q", signature)
- }
-
- var hashFunc func() hash.Hash
- switch sigParts[0] {
- case sha1Prefix:
- hashFunc = sha1.New
- case sha256Prefix:
- hashFunc = sha256.New
- case sha512Prefix:
- hashFunc = sha512.New
- default:
- return nil, nil, fmt.Errorf("unknown hash type prefix: %q", sigParts[0])
- }
-
- buf, err := hex.DecodeString(sigParts[1])
- if err != nil {
- return nil, nil, fmt.Errorf("error decoding signature %q: %v", signature, err)
- }
- return buf, hashFunc, nil
-}
-
-// ValidatePayload validates an incoming GitHub Webhook event request
-// and returns the (JSON) payload.
-// secretKey is the GitHub Webhook secret message.
-//
-// Example usage:
-//
-// func (s *GitHubEventMonitor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-// payload, err := github.ValidatePayload(r, s.webhookSecretKey)
-// if err != nil { ... }
-// // Process payload...
-// }
-//
-func ValidatePayload(r *http.Request, secretKey []byte) (payload []byte, err error) {
- payload, err = ioutil.ReadAll(r.Body)
- if err != nil {
- return nil, err
- }
-
- sig := r.Header.Get(signatureHeader)
- if err := validateSignature(sig, payload, secretKey); err != nil {
- return nil, err
- }
- return payload, nil
-}
-
-// validateSignature validates the signature for the given payload.
-// signature is the GitHub hash signature delivered in the X-Hub-Signature header.
-// payload is the JSON payload sent by GitHub Webhooks.
-// secretKey is the GitHub Webhook secret message.
-//
-// GitHub docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github
-func validateSignature(signature string, payload, secretKey []byte) error {
- messageMAC, hashFunc, err := messageMAC(signature)
- if err != nil {
- return err
- }
- if !checkMAC(payload, messageMAC, secretKey, hashFunc) {
- return errors.New("payload signature check failed")
- }
- return nil
-}
diff --git a/vendor/src/github.com/google/go-github/github/messages_test.go b/vendor/src/github.com/google/go-github/github/messages_test.go
deleted file mode 100644
index 5373b6a..0000000
--- a/vendor/src/github.com/google/go-github/github/messages_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "bytes"
- "net/http"
- "testing"
-)
-
-func TestValidatePayload(t *testing.T) {
- const defaultBody = `{"yo":true}` // All tests below use the default request body and signature.
- const defaultSignature = "sha1=126f2c800419c60137ce748d7672e77b65cf16d6"
- secretKey := []byte("0123456789abcdef")
- tests := []struct {
- signature string
- eventID string
- event string
- wantEventID string
- wantEvent string
- wantPayload string
- }{
- // The following tests generate expected errors:
- {}, // Missing signature
- {signature: "yo"}, // Missing signature prefix
- {signature: "sha1=yo"}, // Signature not hex string
- {signature: "sha1=012345"}, // Invalid signature
- // The following tests expect err=nil:
- {
- signature: defaultSignature,
- eventID: "dead-beef",
- event: "ping",
- wantEventID: "dead-beef",
- wantEvent: "ping",
- wantPayload: defaultBody,
- },
- {
- signature: defaultSignature,
- event: "ping",
- wantEvent: "ping",
- wantPayload: defaultBody,
- },
- {
- signature: "sha256=b1f8020f5b4cd42042f807dd939015c4a418bc1ff7f604dd55b0a19b5d953d9b",
- event: "ping",
- wantEvent: "ping",
- wantPayload: defaultBody,
- },
- {
- signature: "sha512=8456767023c1195682e182a23b3f5d19150ecea598fde8cb85918f7281b16079471b1329f92b912c4d8bd7455cb159777db8f29608b20c7c87323ba65ae62e1f",
- event: "ping",
- wantEvent: "ping",
- wantPayload: defaultBody,
- },
- }
-
- for _, test := range tests {
- buf := bytes.NewBufferString(defaultBody)
- req, err := http.NewRequest("GET", "http://localhost/event", buf)
- if err != nil {
- t.Fatalf("NewRequest: %v", err)
- }
- if test.signature != "" {
- req.Header.Set(signatureHeader, test.signature)
- }
-
- got, err := ValidatePayload(req, secretKey)
- if err != nil {
- if test.wantPayload != "" {
- t.Errorf("ValidatePayload(%#v): err = %v, want nil", test, err)
- }
- continue
- }
- if string(got) != test.wantPayload {
- t.Errorf("ValidatePayload = %q, want %q", got, test.wantPayload)
- }
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/migrations.go b/vendor/src/github.com/google/go-github/github/migrations.go
deleted file mode 100644
index a7890b0..0000000
--- a/vendor/src/github.com/google/go-github/github/migrations.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "errors"
- "fmt"
- "net/http"
- "strings"
-)
-
-// MigrationService provides access to the migration related functions
-// in the GitHub API.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/
-type MigrationService service
-
-// Migration represents a GitHub migration (archival).
-type Migration struct {
- ID *int `json:"id,omitempty"`
- GUID *string `json:"guid,omitempty"`
- // State is the current state of a migration.
- // Possible values are:
- // "pending" which means the migration hasn't started yet,
- // "exporting" which means the migration is in progress,
- // "exported" which means the migration finished successfully, or
- // "failed" which means the migration failed.
- State *string `json:"state,omitempty"`
- // LockRepositories indicates whether repositories are locked (to prevent
- // manipulation) while migrating data.
- LockRepositories *bool `json:"lock_repositories,omitempty"`
- // ExcludeAttachments indicates whether attachments should be excluded from
- // the migration (to reduce migration archive file size).
- ExcludeAttachments *bool `json:"exclude_attachments,omitempty"`
- URL *string `json:"url,omitempty"`
- CreatedAt *string `json:"created_at,omitempty"`
- UpdatedAt *string `json:"updated_at,omitempty"`
- Repositories []*Repository `json:"repositories,omitempty"`
-}
-
-func (m Migration) String() string {
- return Stringify(m)
-}
-
-// MigrationOptions specifies the optional parameters to Migration methods.
-type MigrationOptions struct {
- // LockRepositories indicates whether repositories should be locked (to prevent
- // manipulation) while migrating data.
- LockRepositories bool
-
- // ExcludeAttachments indicates whether attachments should be excluded from
- // the migration (to reduce migration archive file size).
- ExcludeAttachments bool
-}
-
-// startMigration represents the body of a StartMigration request.
-type startMigration struct {
- // Repositories is a slice of repository names to migrate.
- Repositories []string `json:"repositories,omitempty"`
-
- // LockRepositories indicates whether repositories should be locked (to prevent
- // manipulation) while migrating data.
- LockRepositories *bool `json:"lock_repositories,omitempty"`
-
- // ExcludeAttachments indicates whether attachments should be excluded from
- // the migration (to reduce migration archive file size).
- ExcludeAttachments *bool `json:"exclude_attachments,omitempty"`
-}
-
-// StartMigration starts the generation of a migration archive.
-// repos is a slice of repository names to migrate.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/migrations/#start-a-migration
-func (s *MigrationService) StartMigration(org string, repos []string, opt *MigrationOptions) (*Migration, *Response, error) {
- u := fmt.Sprintf("orgs/%v/migrations", org)
-
- body := &startMigration{Repositories: repos}
- if opt != nil {
- body.LockRepositories = Bool(opt.LockRepositories)
- body.ExcludeAttachments = Bool(opt.ExcludeAttachments)
- }
-
- req, err := s.client.NewRequest("POST", u, body)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeMigrationsPreview)
-
- m := &Migration{}
- resp, err := s.client.Do(req, m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// ListMigrations lists the most recent migrations.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/migrations/#get-a-list-of-migrations
-func (s *MigrationService) ListMigrations(org string) ([]*Migration, *Response, error) {
- u := fmt.Sprintf("orgs/%v/migrations", org)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeMigrationsPreview)
-
- var m []*Migration
- resp, err := s.client.Do(req, &m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// MigrationStatus gets the status of a specific migration archive.
-// id is the migration ID.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/migrations/#get-the-status-of-a-migration
-func (s *MigrationService) MigrationStatus(org string, id int) (*Migration, *Response, error) {
- u := fmt.Sprintf("orgs/%v/migrations/%v", org, id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeMigrationsPreview)
-
- m := &Migration{}
- resp, err := s.client.Do(req, m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// MigrationArchiveURL fetches a migration archive URL.
-// id is the migration ID.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/migrations/#download-a-migration-archive
-func (s *MigrationService) MigrationArchiveURL(org string, id int) (url string, err error) {
- u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return "", err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeMigrationsPreview)
-
- s.client.clientMu.Lock()
- defer s.client.clientMu.Unlock()
-
- // Disable the redirect mechanism because AWS fails if the GitHub auth token is provided.
- var loc string
- saveRedirect := s.client.client.CheckRedirect
- s.client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
- loc = req.URL.String()
- return errors.New("disable redirect")
- }
- defer func() { s.client.client.CheckRedirect = saveRedirect }()
-
- _, err = s.client.Do(req, nil) // expect error from disable redirect
- if err == nil {
- return "", errors.New("expected redirect, none provided")
- }
- if !strings.Contains(err.Error(), "disable redirect") {
- return "", err
- }
- return loc, nil
-}
-
-// DeleteMigration deletes a previous migration archive.
-// id is the migration ID.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/migrations/#delete-a-migration-archive
-func (s *MigrationService) DeleteMigration(org string, id int) (*Response, error) {
- u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeMigrationsPreview)
-
- return s.client.Do(req, nil)
-}
-
-// UnlockRepo unlocks a repository that was locked for migration.
-// id is the migration ID.
-// You should unlock each migrated repository and delete them when the migration
-// is complete and you no longer need the source data.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/migrations/#unlock-a-repository
-func (s *MigrationService) UnlockRepo(org string, id int, repo string) (*Response, error) {
- u := fmt.Sprintf("orgs/%v/migrations/%v/repos/%v/lock", org, id, repo)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeMigrationsPreview)
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/migrations_source_import.go b/vendor/src/github.com/google/go-github/github/migrations_source_import.go
deleted file mode 100644
index 6ed4acf..0000000
--- a/vendor/src/github.com/google/go-github/github/migrations_source_import.go
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// Import represents a repository import request.
-type Import struct {
- // The URL of the originating repository.
- VCSURL *string `json:"vcs_url,omitempty"`
- // The originating VCS type. Can be one of 'subversion', 'git',
- // 'mercurial', or 'tfvc'. Without this parameter, the import job will
- // take additional time to detect the VCS type before beginning the
- // import. This detection step will be reflected in the response.
- VCS *string `json:"vcs,omitempty"`
- // VCSUsername and VCSPassword are only used for StartImport calls that
- // are importing a password-protected repository.
- VCSUsername *string `json:"vcs_username,omitempty"`
- VCSPassword *string `json:"vcs_password,omitempty"`
- // For a tfvc import, the name of the project that is being imported.
- TFVCProject *string `json:"tfvc_project,omitempty"`
-
- // LFS related fields that may be preset in the Import Progress response
-
- // Describes whether the import has been opted in or out of using Git
- // LFS. The value can be 'opt_in', 'opt_out', or 'undecided' if no
- // action has been taken.
- UseLFS *string `json:"use_lfs,omitempty"`
- // Describes whether files larger than 100MB were found during the
- // importing step.
- HasLargeFiles *bool `json:"has_large_files,omitempty"`
- // The total size in gigabytes of files larger than 100MB found in the
- // originating repository.
- LargeFilesSize *int `json:"large_files_size,omitempty"`
- // The total number of files larger than 100MB found in the originating
- // repository. To see a list of these files, call LargeFiles.
- LargeFilesCount *int `json:"large_files_count,omitempty"`
-
- // Identifies the current status of an import. An import that does not
- // have errors will progress through these steps:
- //
- // detecting - the "detection" step of the import is in progress
- // because the request did not include a VCS parameter. The
- // import is identifying the type of source control present at
- // the URL.
- // importing - the "raw" step of the import is in progress. This is
- // where commit data is fetched from the original repository.
- // The import progress response will include CommitCount (the
- // total number of raw commits that will be imported) and
- // Percent (0 - 100, the current progress through the import).
- // mapping - the "rewrite" step of the import is in progress. This
- // is where SVN branches are converted to Git branches, and
- // where author updates are applied. The import progress
- // response does not include progress information.
- // pushing - the "push" step of the import is in progress. This is
- // where the importer updates the repository on GitHub. The
- // import progress response will include PushPercent, which is
- // the percent value reported by git push when it is "Writing
- // objects".
- // complete - the import is complete, and the repository is ready
- // on GitHub.
- //
- // If there are problems, you will see one of these in the status field:
- //
- // auth_failed - the import requires authentication in order to
- // connect to the original repository. Make an UpdateImport
- // request, and include VCSUsername and VCSPassword.
- // error - the import encountered an error. The import progress
- // response will include the FailedStep and an error message.
- // Contact GitHub support for more information.
- // detection_needs_auth - the importer requires authentication for
- // the originating repository to continue detection. Make an
- // UpdatImport request, and include VCSUsername and
- // VCSPassword.
- // detection_found_nothing - the importer didn't recognize any
- // source control at the URL.
- // detection_found_multiple - the importer found several projects
- // or repositories at the provided URL. When this is the case,
- // the Import Progress response will also include a
- // ProjectChoices field with the possible project choices as
- // values. Make an UpdateImport request, and include VCS and
- // (if applicable) TFVCProject.
- Status *string `json:"status,omitempty"`
- CommitCount *int `json:"commit_count,omitempty"`
- StatusText *string `json:"status_text,omitempty"`
- AuthorsCount *int `json:"authors_count,omitempty"`
- Percent *int `json:"percent,omitempty"`
- PushPercent *int `json:"push_percent,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- AuthorsURL *string `json:"authors_url,omitempty"`
- RepositoryURL *string `json:"repository_url,omitempty"`
- Message *string `json:"message,omitempty"`
- FailedStep *string `json:"failed_step,omitempty"`
-
- // Human readable display name, provided when the Import appears as
- // part of ProjectChoices.
- HumanName *string `json:"human_name,omitempty"`
-
- // When the importer finds several projects or repositories at the
- // provided URLs, this will identify the available choices. Call
- // UpdateImport with the selected Import value.
- ProjectChoices []Import `json:"project_choices,omitempty"`
-}
-
-func (i Import) String() string {
- return Stringify(i)
-}
-
-// SourceImportAuthor identifies an author imported from a source repository.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-commit-authors
-type SourceImportAuthor struct {
- ID *int `json:"id,omitempty"`
- RemoteID *string `json:"remote_id,omitempty"`
- RemoteName *string `json:"remote_name,omitempty"`
- Email *string `json:"email,omitempty"`
- Name *string `json:"name,omitempty"`
- URL *string `json:"url,omitempty"`
- ImportURL *string `json:"import_url,omitempty"`
-}
-
-func (a SourceImportAuthor) String() string {
- return Stringify(a)
-}
-
-// LargeFile identifies a file larger than 100MB found during a repository import.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-large-files
-type LargeFile struct {
- RefName *string `json:"ref_name,omitempty"`
- Path *string `json:"path,omitempty"`
- OID *string `json:"oid,omitempty"`
- Size *int `json:"size,omitempty"`
-}
-
-func (f LargeFile) String() string {
- return Stringify(f)
-}
-
-// StartImport initiates a repository import.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#start-an-import
-func (s *MigrationService) StartImport(owner, repo string, in *Import) (*Import, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
- req, err := s.client.NewRequest("PUT", u, in)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeImportPreview)
-
- out := new(Import)
- resp, err := s.client.Do(req, out)
- if err != nil {
- return nil, resp, err
- }
-
- return out, resp, err
-}
-
-// QueryImport queries for the status and progress of an ongoing repository import.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-import-progress
-func (s *MigrationService) ImportProgress(owner, repo string) (*Import, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeImportPreview)
-
- out := new(Import)
- resp, err := s.client.Do(req, out)
- if err != nil {
- return nil, resp, err
- }
-
- return out, resp, err
-}
-
-// UpdateImport initiates a repository import.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#update-existing-import
-func (s *MigrationService) UpdateImport(owner, repo string, in *Import) (*Import, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
- req, err := s.client.NewRequest("PATCH", u, in)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeImportPreview)
-
- out := new(Import)
- resp, err := s.client.Do(req, out)
- if err != nil {
- return nil, resp, err
- }
-
- return out, resp, err
-}
-
-// CommitAuthors gets the authors mapped from the original repository.
-//
-// Each type of source control system represents authors in a different way.
-// For example, a Git commit author has a display name and an email address,
-// but a Subversion commit author just has a username. The GitHub Importer will
-// make the author information valid, but the author might not be correct. For
-// example, it will change the bare Subversion username "hubot" into something
-// like "hubot ".
-//
-// This method and MapCommitAuthor allow you to provide correct Git author
-// information.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-commit-authors
-func (s *MigrationService) CommitAuthors(owner, repo string) ([]*SourceImportAuthor, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/import/authors", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeImportPreview)
-
- authors := new([]*SourceImportAuthor)
- resp, err := s.client.Do(req, authors)
- if err != nil {
- return nil, resp, err
- }
-
- return *authors, resp, err
-}
-
-// MapCommitAuthor updates an author's identity for the import. Your
-// application can continue updating authors any time before you push new
-// commits to the repository.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#map-a-commit-author
-func (s *MigrationService) MapCommitAuthor(owner, repo string, id int, author *SourceImportAuthor) (*SourceImportAuthor, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/import/authors/%v", owner, repo, id)
- req, err := s.client.NewRequest("PATCH", u, author)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeImportPreview)
-
- out := new(SourceImportAuthor)
- resp, err := s.client.Do(req, out)
- if err != nil {
- return nil, resp, err
- }
-
- return out, resp, err
-}
-
-// SetLFSPreference sets whether imported repositories should use Git LFS for
-// files larger than 100MB. Only the UseLFS field on the provided Import is
-// used.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#set-git-lfs-preference
-func (s *MigrationService) SetLFSPreference(owner, repo string, in *Import) (*Import, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/import/lfs", owner, repo)
- req, err := s.client.NewRequest("PATCH", u, in)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeImportPreview)
-
- out := new(Import)
- resp, err := s.client.Do(req, out)
- if err != nil {
- return nil, resp, err
- }
-
- return out, resp, err
-}
-
-// LargeFiles lists files larger than 100MB found during the import.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-large-files
-func (s *MigrationService) LargeFiles(owner, repo string) ([]*LargeFile, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/import/large_files", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeImportPreview)
-
- files := new([]*LargeFile)
- resp, err := s.client.Do(req, files)
- if err != nil {
- return nil, resp, err
- }
-
- return *files, resp, err
-}
-
-// CancelImport stops an import for a repository.
-//
-// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#cancel-an-import
-func (s *MigrationService) CancelImport(owner, repo string) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypeImportPreview)
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/migrations_source_import_test.go b/vendor/src/github.com/google/go-github/github/migrations_source_import_test.go
deleted file mode 100644
index 4995b59..0000000
--- a/vendor/src/github.com/google/go-github/github/migrations_source_import_test.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestMigrationService_StartImport(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Import{
- VCS: String("git"),
- VCSURL: String("url"),
- VCSUsername: String("u"),
- VCSPassword: String("p"),
- }
-
- mux.HandleFunc("/repos/o/r/import", func(w http.ResponseWriter, r *http.Request) {
- v := new(Import)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PUT")
- testHeader(t, r, "Accept", mediaTypeImportPreview)
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- w.WriteHeader(http.StatusCreated)
- fmt.Fprint(w, `{"status":"importing"}`)
- })
-
- got, _, err := client.Migrations.StartImport("o", "r", input)
- if err != nil {
- t.Errorf("StartImport returned error: %v", err)
- }
- want := &Import{Status: String("importing")}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("StartImport = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_ImportProgress(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/import", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeImportPreview)
- fmt.Fprint(w, `{"status":"complete"}`)
- })
-
- got, _, err := client.Migrations.ImportProgress("o", "r")
- if err != nil {
- t.Errorf("ImportProgress returned error: %v", err)
- }
- want := &Import{Status: String("complete")}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("ImportProgress = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_UpdateImport(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Import{
- VCS: String("git"),
- VCSURL: String("url"),
- VCSUsername: String("u"),
- VCSPassword: String("p"),
- }
-
- mux.HandleFunc("/repos/o/r/import", func(w http.ResponseWriter, r *http.Request) {
- v := new(Import)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- testHeader(t, r, "Accept", mediaTypeImportPreview)
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- w.WriteHeader(http.StatusCreated)
- fmt.Fprint(w, `{"status":"importing"}`)
- })
-
- got, _, err := client.Migrations.UpdateImport("o", "r", input)
- if err != nil {
- t.Errorf("UpdateImport returned error: %v", err)
- }
- want := &Import{Status: String("importing")}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("UpdateImport = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_CommitAuthors(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/import/authors", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeImportPreview)
- fmt.Fprint(w, `[{"id":1,"name":"a"},{"id":2,"name":"b"}]`)
- })
-
- got, _, err := client.Migrations.CommitAuthors("o", "r")
- if err != nil {
- t.Errorf("CommitAuthors returned error: %v", err)
- }
- want := []*SourceImportAuthor{
- {ID: Int(1), Name: String("a")},
- {ID: Int(2), Name: String("b")},
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("CommitAuthors = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_MapCommitAuthor(t *testing.T) {
- setup()
- defer teardown()
-
- input := &SourceImportAuthor{Name: String("n"), Email: String("e")}
-
- mux.HandleFunc("/repos/o/r/import/authors/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(SourceImportAuthor)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- testHeader(t, r, "Accept", mediaTypeImportPreview)
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id": 1}`)
- })
-
- got, _, err := client.Migrations.MapCommitAuthor("o", "r", 1, input)
- if err != nil {
- t.Errorf("MapCommitAuthor returned error: %v", err)
- }
- want := &SourceImportAuthor{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("MapCommitAuthor = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_SetLFSPreference(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Import{UseLFS: String("opt_in")}
-
- mux.HandleFunc("/repos/o/r/import/lfs", func(w http.ResponseWriter, r *http.Request) {
- v := new(Import)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- testHeader(t, r, "Accept", mediaTypeImportPreview)
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- w.WriteHeader(http.StatusCreated)
- fmt.Fprint(w, `{"status":"importing"}`)
- })
-
- got, _, err := client.Migrations.SetLFSPreference("o", "r", input)
- if err != nil {
- t.Errorf("SetLFSPreference returned error: %v", err)
- }
- want := &Import{Status: String("importing")}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("SetLFSPreference = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_LargeFiles(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/import/large_files", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeImportPreview)
- fmt.Fprint(w, `[{"oid":"a"},{"oid":"b"}]`)
- })
-
- got, _, err := client.Migrations.LargeFiles("o", "r")
- if err != nil {
- t.Errorf("LargeFiles returned error: %v", err)
- }
- want := []*LargeFile{
- {OID: String("a")},
- {OID: String("b")},
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("LargeFiles = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_CancelImport(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/import", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- testHeader(t, r, "Accept", mediaTypeImportPreview)
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Migrations.CancelImport("o", "r")
- if err != nil {
- t.Errorf("CancelImport returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/migrations_test.go b/vendor/src/github.com/google/go-github/github/migrations_test.go
deleted file mode 100644
index 9a902e4..0000000
--- a/vendor/src/github.com/google/go-github/github/migrations_test.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "strings"
- "testing"
-)
-
-func TestMigrationService_StartMigration(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/migrations", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testHeader(t, r, "Accept", mediaTypeMigrationsPreview)
-
- w.WriteHeader(http.StatusCreated)
- w.Write(migrationJSON)
- })
-
- opt := &MigrationOptions{
- LockRepositories: true,
- ExcludeAttachments: false,
- }
- got, _, err := client.Migrations.StartMigration("o", []string{"r"}, opt)
- if err != nil {
- t.Errorf("StartMigration returned error: %v", err)
- }
- if want := wantMigration; !reflect.DeepEqual(got, want) {
- t.Errorf("StartMigration = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_ListMigrations(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/migrations", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeMigrationsPreview)
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte(fmt.Sprintf("[%s]", migrationJSON)))
- })
-
- got, _, err := client.Migrations.ListMigrations("o")
- if err != nil {
- t.Errorf("ListMigrations returned error: %v", err)
- }
- if want := []*Migration{wantMigration}; !reflect.DeepEqual(got, want) {
- t.Errorf("ListMigrations = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_MigrationStatus(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/migrations/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeMigrationsPreview)
-
- w.WriteHeader(http.StatusOK)
- w.Write(migrationJSON)
- })
-
- got, _, err := client.Migrations.MigrationStatus("o", 1)
- if err != nil {
- t.Errorf("MigrationStatus returned error: %v", err)
- }
- if want := wantMigration; !reflect.DeepEqual(got, want) {
- t.Errorf("MigrationStatus = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_MigrationArchiveURL(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/migrations/1/archive", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeMigrationsPreview)
-
- http.Redirect(w, r, "/yo", http.StatusFound)
- })
- mux.HandleFunc("/yo", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte("0123456789abcdef"))
- })
-
- got, err := client.Migrations.MigrationArchiveURL("o", 1)
- if err != nil {
- t.Errorf("MigrationStatus returned error: %v", err)
- }
- if want := "/yo"; !strings.HasSuffix(got, want) {
- t.Errorf("MigrationArchiveURL = %+v, want %+v", got, want)
- }
-}
-
-func TestMigrationService_DeleteMigration(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/migrations/1/archive", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- testHeader(t, r, "Accept", mediaTypeMigrationsPreview)
-
- w.WriteHeader(http.StatusNoContent)
- })
-
- if _, err := client.Migrations.DeleteMigration("o", 1); err != nil {
- t.Errorf("DeleteMigration returned error: %v", err)
- }
-}
-
-func TestMigrationService_UnlockRepo(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/migrations/1/repos/r/lock", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- testHeader(t, r, "Accept", mediaTypeMigrationsPreview)
-
- w.WriteHeader(http.StatusNoContent)
- })
-
- if _, err := client.Migrations.UnlockRepo("o", 1, "r"); err != nil {
- t.Errorf("UnlockRepo returned error: %v", err)
- }
-}
-
-var migrationJSON = []byte(`{
- "id": 79,
- "guid": "0b989ba4-242f-11e5-81e1-c7b6966d2516",
- "state": "pending",
- "lock_repositories": true,
- "exclude_attachments": false,
- "url": "https://api.github.com/orgs/octo-org/migrations/79",
- "created_at": "2015-07-06T15:33:38-07:00",
- "updated_at": "2015-07-06T15:33:38-07:00",
- "repositories": [
- {
- "id": 1296269,
- "name": "Hello-World",
- "full_name": "octocat/Hello-World",
- "description": "This your first repo!"
- }
- ]
-}`)
-
-var wantMigration = &Migration{
- ID: Int(79),
- GUID: String("0b989ba4-242f-11e5-81e1-c7b6966d2516"),
- State: String("pending"),
- LockRepositories: Bool(true),
- ExcludeAttachments: Bool(false),
- URL: String("https://api.github.com/orgs/octo-org/migrations/79"),
- CreatedAt: String("2015-07-06T15:33:38-07:00"),
- UpdatedAt: String("2015-07-06T15:33:38-07:00"),
- Repositories: []*Repository{
- {
- ID: Int(1296269),
- Name: String("Hello-World"),
- FullName: String("octocat/Hello-World"),
- Description: String("This your first repo!"),
- },
- },
-}
diff --git a/vendor/src/github.com/google/go-github/github/misc.go b/vendor/src/github.com/google/go-github/github/misc.go
deleted file mode 100644
index 8576a4c..0000000
--- a/vendor/src/github.com/google/go-github/github/misc.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "bytes"
- "fmt"
- "net/url"
-)
-
-// MarkdownOptions specifies optional parameters to the Markdown method.
-type MarkdownOptions struct {
- // Mode identifies the rendering mode. Possible values are:
- // markdown - render a document as plain Markdown, just like
- // README files are rendered.
- //
- // gfm - to render a document as user-content, e.g. like user
- // comments or issues are rendered. In GFM mode, hard line breaks are
- // always taken into account, and issue and user mentions are linked
- // accordingly.
- //
- // Default is "markdown".
- Mode string
-
- // Context identifies the repository context. Only taken into account
- // when rendering as "gfm".
- Context string
-}
-
-type markdownRequest struct {
- Text *string `json:"text,omitempty"`
- Mode *string `json:"mode,omitempty"`
- Context *string `json:"context,omitempty"`
-}
-
-// Markdown renders an arbitrary Markdown document.
-//
-// GitHub API docs: https://developer.github.com/v3/markdown/
-func (c *Client) Markdown(text string, opt *MarkdownOptions) (string, *Response, error) {
- request := &markdownRequest{Text: String(text)}
- if opt != nil {
- if opt.Mode != "" {
- request.Mode = String(opt.Mode)
- }
- if opt.Context != "" {
- request.Context = String(opt.Context)
- }
- }
-
- req, err := c.NewRequest("POST", "markdown", request)
- if err != nil {
- return "", nil, err
- }
-
- buf := new(bytes.Buffer)
- resp, err := c.Do(req, buf)
- if err != nil {
- return "", resp, err
- }
-
- return buf.String(), resp, nil
-}
-
-// ListEmojis returns the emojis available to use on GitHub.
-//
-// GitHub API docs: https://developer.github.com/v3/emojis/
-func (c *Client) ListEmojis() (map[string]string, *Response, error) {
- req, err := c.NewRequest("GET", "emojis", nil)
- if err != nil {
- return nil, nil, err
- }
-
- var emoji map[string]string
- resp, err := c.Do(req, &emoji)
- if err != nil {
- return nil, resp, err
- }
-
- return emoji, resp, nil
-}
-
-// APIMeta represents metadata about the GitHub API.
-type APIMeta struct {
- // An Array of IP addresses in CIDR format specifying the addresses
- // that incoming service hooks will originate from on GitHub.com.
- Hooks []string `json:"hooks,omitempty"`
-
- // An Array of IP addresses in CIDR format specifying the Git servers
- // for GitHub.com.
- Git []string `json:"git,omitempty"`
-
- // Whether authentication with username and password is supported.
- // (GitHub Enterprise instances using CAS or OAuth for authentication
- // will return false. Features like Basic Authentication with a
- // username and password, sudo mode, and two-factor authentication are
- // not supported on these servers.)
- VerifiablePasswordAuthentication *bool `json:"verifiable_password_authentication,omitempty"`
-
- // An array of IP addresses in CIDR format specifying the addresses
- // which serve GitHub Pages websites.
- Pages []string `json:"pages,omitempty"`
-}
-
-// APIMeta returns information about GitHub.com, the service. Or, if you access
-// this endpoint on your organization’s GitHub Enterprise installation, this
-// endpoint provides information about that installation.
-//
-// GitHub API docs: https://developer.github.com/v3/meta/
-func (c *Client) APIMeta() (*APIMeta, *Response, error) {
- req, err := c.NewRequest("GET", "meta", nil)
- if err != nil {
- return nil, nil, err
- }
-
- meta := new(APIMeta)
- resp, err := c.Do(req, meta)
- if err != nil {
- return nil, resp, err
- }
-
- return meta, resp, nil
-}
-
-// Octocat returns an ASCII art octocat with the specified message in a speech
-// bubble. If message is empty, a random zen phrase is used.
-func (c *Client) Octocat(message string) (string, *Response, error) {
- u := "octocat"
- if message != "" {
- u = fmt.Sprintf("%s?s=%s", u, url.QueryEscape(message))
- }
-
- req, err := c.NewRequest("GET", u, nil)
- if err != nil {
- return "", nil, err
- }
-
- buf := new(bytes.Buffer)
- resp, err := c.Do(req, buf)
- if err != nil {
- return "", resp, err
- }
-
- return buf.String(), resp, nil
-}
-
-// Zen returns a random line from The Zen of GitHub.
-//
-// see also: http://warpspire.com/posts/taste/
-func (c *Client) Zen() (string, *Response, error) {
- req, err := c.NewRequest("GET", "zen", nil)
- if err != nil {
- return "", nil, err
- }
-
- buf := new(bytes.Buffer)
- resp, err := c.Do(req, buf)
- if err != nil {
- return "", resp, err
- }
-
- return buf.String(), resp, nil
-}
-
-// ServiceHook represents a hook that has configuration settings, a list of
-// available events, and default events.
-type ServiceHook struct {
- Name *string `json:"name,omitempty"`
- Events []string `json:"events,omitempty"`
- SupportedEvents []string `json:"supported_events,omitempty"`
- Schema [][]string `json:"schema,omitempty"`
-}
-
-func (s *ServiceHook) String() string {
- return Stringify(s)
-}
-
-// ListServiceHooks lists all of the available service hooks.
-//
-// GitHub API docs: https://developer.github.com/webhooks/#services
-func (c *Client) ListServiceHooks() ([]*ServiceHook, *Response, error) {
- u := "hooks"
- req, err := c.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- hooks := new([]*ServiceHook)
- resp, err := c.Do(req, hooks)
- if err != nil {
- return nil, resp, err
- }
-
- return *hooks, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/misc_test.go b/vendor/src/github.com/google/go-github/github/misc_test.go
deleted file mode 100644
index afced70..0000000
--- a/vendor/src/github.com/google/go-github/github/misc_test.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestMarkdown(t *testing.T) {
- setup()
- defer teardown()
-
- input := &markdownRequest{
- Text: String("# text #"),
- Mode: String("gfm"),
- Context: String("google/go-github"),
- }
- mux.HandleFunc("/markdown", func(w http.ResponseWriter, r *http.Request) {
- v := new(markdownRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
- fmt.Fprint(w, `text `)
- })
-
- md, _, err := client.Markdown("# text #", &MarkdownOptions{
- Mode: "gfm",
- Context: "google/go-github",
- })
- if err != nil {
- t.Errorf("Markdown returned error: %v", err)
- }
-
- if want := "text "; want != md {
- t.Errorf("Markdown returned %+v, want %+v", md, want)
- }
-}
-
-func TestListEmojis(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/emojis", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"+1": "+1.png"}`)
- })
-
- emoji, _, err := client.ListEmojis()
- if err != nil {
- t.Errorf("ListEmojis returned error: %v", err)
- }
-
- want := map[string]string{"+1": "+1.png"}
- if !reflect.DeepEqual(want, emoji) {
- t.Errorf("ListEmojis returned %+v, want %+v", emoji, want)
- }
-}
-
-func TestAPIMeta(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/meta", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"hooks":["h"], "git":["g"], "pages":["p"], "verifiable_password_authentication": true}`)
- })
-
- meta, _, err := client.APIMeta()
- if err != nil {
- t.Errorf("APIMeta returned error: %v", err)
- }
-
- want := &APIMeta{
- Hooks: []string{"h"},
- Git: []string{"g"},
- Pages: []string{"p"},
- VerifiablePasswordAuthentication: Bool(true),
- }
- if !reflect.DeepEqual(want, meta) {
- t.Errorf("APIMeta returned %+v, want %+v", meta, want)
- }
-}
-
-func TestOctocat(t *testing.T) {
- setup()
- defer teardown()
-
- input := "input"
- output := "sample text"
-
- mux.HandleFunc("/octocat", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"s": input})
- w.Header().Set("Content-Type", "application/octocat-stream")
- fmt.Fprint(w, output)
- })
-
- got, _, err := client.Octocat(input)
- if err != nil {
- t.Errorf("Octocat returned error: %v", err)
- }
-
- if want := output; got != want {
- t.Errorf("Octocat returned %+v, want %+v", got, want)
- }
-}
-
-func TestZen(t *testing.T) {
- setup()
- defer teardown()
-
- output := "sample text"
-
- mux.HandleFunc("/zen", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.Header().Set("Content-Type", "text/plain;charset=utf-8")
- fmt.Fprint(w, output)
- })
-
- got, _, err := client.Zen()
- if err != nil {
- t.Errorf("Zen returned error: %v", err)
- }
-
- if want := output; got != want {
- t.Errorf("Zen returned %+v, want %+v", got, want)
- }
-}
-
-func TestRepositoriesService_ListServiceHooks(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/hooks", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{
- "name":"n",
- "events":["e"],
- "supported_events":["s"],
- "schema":[
- ["a", "b"]
- ]
- }]`)
- })
-
- hooks, _, err := client.Repositories.ListServiceHooks()
- if err != nil {
- t.Errorf("Repositories.ListHooks returned error: %v", err)
- }
-
- want := []*ServiceHook{{
- Name: String("n"),
- Events: []string{"e"},
- SupportedEvents: []string{"s"},
- Schema: [][]string{{"a", "b"}},
- }}
- if !reflect.DeepEqual(hooks, want) {
- t.Errorf("Repositories.ListServiceHooks returned %+v, want %+v", hooks, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/orgs.go b/vendor/src/github.com/google/go-github/github/orgs.go
deleted file mode 100644
index e71055c..0000000
--- a/vendor/src/github.com/google/go-github/github/orgs.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// OrganizationsService provides access to the organization related functions
-// in the GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/
-type OrganizationsService service
-
-// Organization represents a GitHub organization account.
-type Organization struct {
- Login *string `json:"login,omitempty"`
- ID *int `json:"id,omitempty"`
- AvatarURL *string `json:"avatar_url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- Name *string `json:"name,omitempty"`
- Company *string `json:"company,omitempty"`
- Blog *string `json:"blog,omitempty"`
- Location *string `json:"location,omitempty"`
- Email *string `json:"email,omitempty"`
- PublicRepos *int `json:"public_repos,omitempty"`
- PublicGists *int `json:"public_gists,omitempty"`
- Followers *int `json:"followers,omitempty"`
- Following *int `json:"following,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
- TotalPrivateRepos *int `json:"total_private_repos,omitempty"`
- OwnedPrivateRepos *int `json:"owned_private_repos,omitempty"`
- PrivateGists *int `json:"private_gists,omitempty"`
- DiskUsage *int `json:"disk_usage,omitempty"`
- Collaborators *int `json:"collaborators,omitempty"`
- BillingEmail *string `json:"billing_email,omitempty"`
- Type *string `json:"type,omitempty"`
- Plan *Plan `json:"plan,omitempty"`
-
- // API URLs
- URL *string `json:"url,omitempty"`
- EventsURL *string `json:"events_url,omitempty"`
- MembersURL *string `json:"members_url,omitempty"`
- PublicMembersURL *string `json:"public_members_url,omitempty"`
- ReposURL *string `json:"repos_url,omitempty"`
-}
-
-func (o Organization) String() string {
- return Stringify(o)
-}
-
-// Plan represents the payment plan for an account. See plans at https://github.com/plans.
-type Plan struct {
- Name *string `json:"name,omitempty"`
- Space *int `json:"space,omitempty"`
- Collaborators *int `json:"collaborators,omitempty"`
- PrivateRepos *int `json:"private_repos,omitempty"`
-}
-
-func (p Plan) String() string {
- return Stringify(p)
-}
-
-// OrganizationsListOptions specifies the optional parameters to the
-// OrganizationsService.ListAll method.
-type OrganizationsListOptions struct {
- // Since filters Organizations by ID.
- Since int `url:"since,omitempty"`
-
- ListOptions
-}
-
-// ListAll lists all organizations, in the order that they were created on GitHub.
-//
-// Note: Pagination is powered exclusively by the since parameter. To continue
-// listing the next set of organizations, use the ID of the last-returned organization
-// as the opts.Since parameter for the next call.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/#list-all-organizations
-func (s *OrganizationsService) ListAll(opt *OrganizationsListOptions) ([]*Organization, *Response, error) {
- u, err := addOptions("organizations", opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- orgs := []*Organization{}
- resp, err := s.client.Do(req, &orgs)
- if err != nil {
- return nil, resp, err
- }
- return orgs, resp, err
-}
-
-// List the organizations for a user. Passing the empty string will list
-// organizations for the authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/#list-user-organizations
-func (s *OrganizationsService) List(user string, opt *ListOptions) ([]*Organization, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("users/%v/orgs", user)
- } else {
- u = "user/orgs"
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- orgs := new([]*Organization)
- resp, err := s.client.Do(req, orgs)
- if err != nil {
- return nil, resp, err
- }
-
- return *orgs, resp, err
-}
-
-// Get fetches an organization by name.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/#get-an-organization
-func (s *OrganizationsService) Get(org string) (*Organization, *Response, error) {
- u := fmt.Sprintf("orgs/%v", org)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- organization := new(Organization)
- resp, err := s.client.Do(req, organization)
- if err != nil {
- return nil, resp, err
- }
-
- return organization, resp, err
-}
-
-// Edit an organization.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/#edit-an-organization
-func (s *OrganizationsService) Edit(name string, org *Organization) (*Organization, *Response, error) {
- u := fmt.Sprintf("orgs/%v", name)
- req, err := s.client.NewRequest("PATCH", u, org)
- if err != nil {
- return nil, nil, err
- }
-
- o := new(Organization)
- resp, err := s.client.Do(req, o)
- if err != nil {
- return nil, resp, err
- }
-
- return o, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/orgs_hooks.go b/vendor/src/github.com/google/go-github/github/orgs_hooks.go
deleted file mode 100644
index 95b8322..0000000
--- a/vendor/src/github.com/google/go-github/github/orgs_hooks.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2015 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// ListHooks lists all Hooks for the specified organization.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#list-hooks
-func (s *OrganizationsService) ListHooks(org string, opt *ListOptions) ([]*Hook, *Response, error) {
- u := fmt.Sprintf("orgs/%v/hooks", org)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- hooks := new([]*Hook)
- resp, err := s.client.Do(req, hooks)
- if err != nil {
- return nil, resp, err
- }
-
- return *hooks, resp, err
-}
-
-// GetHook returns a single specified Hook.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#get-single-hook
-func (s *OrganizationsService) GetHook(org string, id int) (*Hook, *Response, error) {
- u := fmt.Sprintf("orgs/%v/hooks/%d", org, id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
- hook := new(Hook)
- resp, err := s.client.Do(req, hook)
- return hook, resp, err
-}
-
-// CreateHook creates a Hook for the specified org.
-// Name and Config are required fields.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#create-a-hook
-func (s *OrganizationsService) CreateHook(org string, hook *Hook) (*Hook, *Response, error) {
- u := fmt.Sprintf("orgs/%v/hooks", org)
- req, err := s.client.NewRequest("POST", u, hook)
- if err != nil {
- return nil, nil, err
- }
-
- h := new(Hook)
- resp, err := s.client.Do(req, h)
- if err != nil {
- return nil, resp, err
- }
-
- return h, resp, err
-}
-
-// EditHook updates a specified Hook.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#edit-a-hook
-func (s *OrganizationsService) EditHook(org string, id int, hook *Hook) (*Hook, *Response, error) {
- u := fmt.Sprintf("orgs/%v/hooks/%d", org, id)
- req, err := s.client.NewRequest("PATCH", u, hook)
- if err != nil {
- return nil, nil, err
- }
- h := new(Hook)
- resp, err := s.client.Do(req, h)
- return h, resp, err
-}
-
-// PingHook triggers a 'ping' event to be sent to the Hook.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#ping-a-hook
-func (s *OrganizationsService) PingHook(org string, id int) (*Response, error) {
- u := fmt.Sprintf("orgs/%v/hooks/%d/pings", org, id)
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// DeleteHook deletes a specified Hook.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#delete-a-hook
-func (s *OrganizationsService) DeleteHook(org string, id int) (*Response, error) {
- u := fmt.Sprintf("orgs/%v/hooks/%d", org, id)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/orgs_hooks_test.go b/vendor/src/github.com/google/go-github/github/orgs_hooks_test.go
deleted file mode 100644
index b4c3af7..0000000
--- a/vendor/src/github.com/google/go-github/github/orgs_hooks_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2015 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestOrganizationsService_ListHooks(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/hooks", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
- })
-
- opt := &ListOptions{Page: 2}
-
- hooks, _, err := client.Organizations.ListHooks("o", opt)
- if err != nil {
- t.Errorf("Organizations.ListHooks returned error: %v", err)
- }
-
- want := []*Hook{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(hooks, want) {
- t.Errorf("Organizations.ListHooks returned %+v, want %+v", hooks, want)
- }
-}
-
-func TestOrganizationsService_ListHooks_invalidOrg(t *testing.T) {
- _, _, err := client.Organizations.ListHooks("%", nil)
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_GetHook(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/hooks/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- hook, _, err := client.Organizations.GetHook("o", 1)
- if err != nil {
- t.Errorf("Organizations.GetHook returned error: %v", err)
- }
-
- want := &Hook{ID: Int(1)}
- if !reflect.DeepEqual(hook, want) {
- t.Errorf("Organizations.GetHook returned %+v, want %+v", hook, want)
- }
-}
-
-func TestOrganizationsService_GetHook_invalidOrg(t *testing.T) {
- _, _, err := client.Organizations.GetHook("%", 1)
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_EditHook(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Hook{Name: String("t")}
-
- mux.HandleFunc("/orgs/o/hooks/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(Hook)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- hook, _, err := client.Organizations.EditHook("o", 1, input)
- if err != nil {
- t.Errorf("Organizations.EditHook returned error: %v", err)
- }
-
- want := &Hook{ID: Int(1)}
- if !reflect.DeepEqual(hook, want) {
- t.Errorf("Organizations.EditHook returned %+v, want %+v", hook, want)
- }
-}
-
-func TestOrganizationsService_EditHook_invalidOrg(t *testing.T) {
- _, _, err := client.Organizations.EditHook("%", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_PingHook(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/hooks/1/pings", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- })
-
- _, err := client.Organizations.PingHook("o", 1)
- if err != nil {
- t.Errorf("Organizations.PingHook returned error: %v", err)
- }
-}
-
-func TestOrganizationsService_DeleteHook(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/hooks/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Organizations.DeleteHook("o", 1)
- if err != nil {
- t.Errorf("Organizations.DeleteHook returned error: %v", err)
- }
-}
-
-func TestOrganizationsService_DeleteHook_invalidOrg(t *testing.T) {
- _, err := client.Organizations.DeleteHook("%", 1)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/orgs_members.go b/vendor/src/github.com/google/go-github/github/orgs_members.go
deleted file mode 100644
index 80454ad..0000000
--- a/vendor/src/github.com/google/go-github/github/orgs_members.go
+++ /dev/null
@@ -1,272 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// Membership represents the status of a user's membership in an organization or team.
-type Membership struct {
- URL *string `json:"url,omitempty"`
-
- // State is the user's status within the organization or team.
- // Possible values are: "active", "pending"
- State *string `json:"state,omitempty"`
-
- // Role identifies the user's role within the organization or team.
- // Possible values for organization membership:
- // member - non-owner organization member
- // admin - organization owner
- //
- // Possible values for team membership are:
- // member - a normal member of the team
- // maintainer - a team maintainer. Able to add/remove other team
- // members, promote other team members to team
- // maintainer, and edit the team’s name and description
- Role *string `json:"role,omitempty"`
-
- // For organization membership, the API URL of the organization.
- OrganizationURL *string `json:"organization_url,omitempty"`
-
- // For organization membership, the organization the membership is for.
- Organization *Organization `json:"organization,omitempty"`
-
- // For organization membership, the user the membership is for.
- User *User `json:"user,omitempty"`
-}
-
-func (m Membership) String() string {
- return Stringify(m)
-}
-
-// ListMembersOptions specifies optional parameters to the
-// OrganizationsService.ListMembers method.
-type ListMembersOptions struct {
- // If true (or if the authenticated user is not an owner of the
- // organization), list only publicly visible members.
- PublicOnly bool `url:"-"`
-
- // Filter members returned in the list. Possible values are:
- // 2fa_disabled, all. Default is "all".
- Filter string `url:"filter,omitempty"`
-
- // Role filters members returned by their role in the organization.
- // Possible values are:
- // all - all members of the organization, regardless of role
- // admin - organization owners
- // member - non-organization members
- //
- // Default is "all".
- Role string `url:"role,omitempty"`
-
- ListOptions
-}
-
-// ListMembers lists the members for an organization. If the authenticated
-// user is an owner of the organization, this will return both concealed and
-// public members, otherwise it will only return public members.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/members/#members-list
-func (s *OrganizationsService) ListMembers(org string, opt *ListMembersOptions) ([]*User, *Response, error) {
- var u string
- if opt != nil && opt.PublicOnly {
- u = fmt.Sprintf("orgs/%v/public_members", org)
- } else {
- u = fmt.Sprintf("orgs/%v/members", org)
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- members := new([]*User)
- resp, err := s.client.Do(req, members)
- if err != nil {
- return nil, resp, err
- }
-
- return *members, resp, err
-}
-
-// IsMember checks if a user is a member of an organization.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/members/#check-membership
-func (s *OrganizationsService) IsMember(org, user string) (bool, *Response, error) {
- u := fmt.Sprintf("orgs/%v/members/%v", org, user)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return false, nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- member, err := parseBoolResponse(err)
- return member, resp, err
-}
-
-// IsPublicMember checks if a user is a public member of an organization.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/members/#check-public-membership
-func (s *OrganizationsService) IsPublicMember(org, user string) (bool, *Response, error) {
- u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return false, nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- member, err := parseBoolResponse(err)
- return member, resp, err
-}
-
-// RemoveMember removes a user from all teams of an organization.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/members/#remove-a-member
-func (s *OrganizationsService) RemoveMember(org, user string) (*Response, error) {
- u := fmt.Sprintf("orgs/%v/members/%v", org, user)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// PublicizeMembership publicizes a user's membership in an organization. (A
-// user cannot publicize the membership for another user.)
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/members/#publicize-a-users-membership
-func (s *OrganizationsService) PublicizeMembership(org, user string) (*Response, error) {
- u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
- req, err := s.client.NewRequest("PUT", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// ConcealMembership conceals a user's membership in an organization.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/members/#conceal-a-users-membership
-func (s *OrganizationsService) ConcealMembership(org, user string) (*Response, error) {
- u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// ListOrgMembershipsOptions specifies optional parameters to the
-// OrganizationsService.ListOrgMemberships method.
-type ListOrgMembershipsOptions struct {
- // Filter memberships to include only those with the specified state.
- // Possible values are: "active", "pending".
- State string `url:"state,omitempty"`
-
- ListOptions
-}
-
-// ListOrgMemberships lists the organization memberships for the authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/members/#list-your-organization-memberships
-func (s *OrganizationsService) ListOrgMemberships(opt *ListOrgMembershipsOptions) ([]*Membership, *Response, error) {
- u := "user/memberships/orgs"
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var memberships []*Membership
- resp, err := s.client.Do(req, &memberships)
- if err != nil {
- return nil, resp, err
- }
-
- return memberships, resp, err
-}
-
-// GetOrgMembership gets the membership for a user in a specified organization.
-// Passing an empty string for user will get the membership for the
-// authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/members/#get-organization-membership
-// GitHub API docs: https://developer.github.com/v3/orgs/members/#get-your-organization-membership
-func (s *OrganizationsService) GetOrgMembership(user, org string) (*Membership, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("orgs/%v/memberships/%v", org, user)
- } else {
- u = fmt.Sprintf("user/memberships/orgs/%v", org)
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- membership := new(Membership)
- resp, err := s.client.Do(req, membership)
- if err != nil {
- return nil, resp, err
- }
-
- return membership, resp, err
-}
-
-// EditOrgMembership edits the membership for user in specified organization.
-// Passing an empty string for user will edit the membership for the
-// authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/members/#add-or-update-organization-membership
-// GitHub API docs: https://developer.github.com/v3/orgs/members/#edit-your-organization-membership
-func (s *OrganizationsService) EditOrgMembership(user, org string, membership *Membership) (*Membership, *Response, error) {
- var u, method string
- if user != "" {
- u = fmt.Sprintf("orgs/%v/memberships/%v", org, user)
- method = "PUT"
- } else {
- u = fmt.Sprintf("user/memberships/orgs/%v", org)
- method = "PATCH"
- }
-
- req, err := s.client.NewRequest(method, u, membership)
- if err != nil {
- return nil, nil, err
- }
-
- m := new(Membership)
- resp, err := s.client.Do(req, m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, err
-}
-
-// RemoveOrgMembership removes user from the specified organization. If the
-// user has been invited to the organization, this will cancel their invitation.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/members/#remove-organization-membership
-func (s *OrganizationsService) RemoveOrgMembership(user, org string) (*Response, error) {
- u := fmt.Sprintf("orgs/%v/memberships/%v", org, user)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/orgs_members_test.go b/vendor/src/github.com/google/go-github/github/orgs_members_test.go
deleted file mode 100644
index f95e5be..0000000
--- a/vendor/src/github.com/google/go-github/github/orgs_members_test.go
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestOrganizationsService_ListMembers(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/members", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "filter": "2fa_disabled",
- "role": "admin",
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListMembersOptions{
- PublicOnly: false,
- Filter: "2fa_disabled",
- Role: "admin",
- ListOptions: ListOptions{Page: 2},
- }
- members, _, err := client.Organizations.ListMembers("o", opt)
- if err != nil {
- t.Errorf("Organizations.ListMembers returned error: %v", err)
- }
-
- want := []*User{{ID: Int(1)}}
- if !reflect.DeepEqual(members, want) {
- t.Errorf("Organizations.ListMembers returned %+v, want %+v", members, want)
- }
-}
-
-func TestOrganizationsService_ListMembers_invalidOrg(t *testing.T) {
- _, _, err := client.Organizations.ListMembers("%", nil)
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_ListMembers_public(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/public_members", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListMembersOptions{PublicOnly: true}
- members, _, err := client.Organizations.ListMembers("o", opt)
- if err != nil {
- t.Errorf("Organizations.ListMembers returned error: %v", err)
- }
-
- want := []*User{{ID: Int(1)}}
- if !reflect.DeepEqual(members, want) {
- t.Errorf("Organizations.ListMembers returned %+v, want %+v", members, want)
- }
-}
-
-func TestOrganizationsService_IsMember(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNoContent)
- })
-
- member, _, err := client.Organizations.IsMember("o", "u")
- if err != nil {
- t.Errorf("Organizations.IsMember returned error: %v", err)
- }
- if want := true; member != want {
- t.Errorf("Organizations.IsMember returned %+v, want %+v", member, want)
- }
-}
-
-// ensure that a 404 response is interpreted as "false" and not an error
-func TestOrganizationsService_IsMember_notMember(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNotFound)
- })
-
- member, _, err := client.Organizations.IsMember("o", "u")
- if err != nil {
- t.Errorf("Organizations.IsMember returned error: %+v", err)
- }
- if want := false; member != want {
- t.Errorf("Organizations.IsMember returned %+v, want %+v", member, want)
- }
-}
-
-// ensure that a 400 response is interpreted as an actual error, and not simply
-// as "false" like the above case of a 404
-func TestOrganizationsService_IsMember_error(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- http.Error(w, "BadRequest", http.StatusBadRequest)
- })
-
- member, _, err := client.Organizations.IsMember("o", "u")
- if err == nil {
- t.Errorf("Expected HTTP 400 response")
- }
- if want := false; member != want {
- t.Errorf("Organizations.IsMember returned %+v, want %+v", member, want)
- }
-}
-
-func TestOrganizationsService_IsMember_invalidOrg(t *testing.T) {
- _, _, err := client.Organizations.IsMember("%", "u")
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_IsPublicMember(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNoContent)
- })
-
- member, _, err := client.Organizations.IsPublicMember("o", "u")
- if err != nil {
- t.Errorf("Organizations.IsPublicMember returned error: %v", err)
- }
- if want := true; member != want {
- t.Errorf("Organizations.IsPublicMember returned %+v, want %+v", member, want)
- }
-}
-
-// ensure that a 404 response is interpreted as "false" and not an error
-func TestOrganizationsService_IsPublicMember_notMember(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNotFound)
- })
-
- member, _, err := client.Organizations.IsPublicMember("o", "u")
- if err != nil {
- t.Errorf("Organizations.IsPublicMember returned error: %v", err)
- }
- if want := false; member != want {
- t.Errorf("Organizations.IsPublicMember returned %+v, want %+v", member, want)
- }
-}
-
-// ensure that a 400 response is interpreted as an actual error, and not simply
-// as "false" like the above case of a 404
-func TestOrganizationsService_IsPublicMember_error(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- http.Error(w, "BadRequest", http.StatusBadRequest)
- })
-
- member, _, err := client.Organizations.IsPublicMember("o", "u")
- if err == nil {
- t.Errorf("Expected HTTP 400 response")
- }
- if want := false; member != want {
- t.Errorf("Organizations.IsPublicMember returned %+v, want %+v", member, want)
- }
-}
-
-func TestOrganizationsService_IsPublicMember_invalidOrg(t *testing.T) {
- _, _, err := client.Organizations.IsPublicMember("%", "u")
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_RemoveMember(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Organizations.RemoveMember("o", "u")
- if err != nil {
- t.Errorf("Organizations.RemoveMember returned error: %v", err)
- }
-}
-
-func TestOrganizationsService_RemoveMember_invalidOrg(t *testing.T) {
- _, err := client.Organizations.RemoveMember("%", "u")
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_ListOrgMemberships(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/memberships/orgs", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "state": "active",
- "page": "2",
- })
- fmt.Fprint(w, `[{"url":"u"}]`)
- })
-
- opt := &ListOrgMembershipsOptions{
- State: "active",
- ListOptions: ListOptions{Page: 2},
- }
- memberships, _, err := client.Organizations.ListOrgMemberships(opt)
- if err != nil {
- t.Errorf("Organizations.ListOrgMemberships returned error: %v", err)
- }
-
- want := []*Membership{{URL: String("u")}}
- if !reflect.DeepEqual(memberships, want) {
- t.Errorf("Organizations.ListOrgMemberships returned %+v, want %+v", memberships, want)
- }
-}
-
-func TestOrganizationsService_GetOrgMembership_AuthenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/memberships/orgs/o", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"url":"u"}`)
- })
-
- membership, _, err := client.Organizations.GetOrgMembership("", "o")
- if err != nil {
- t.Errorf("Organizations.GetOrgMembership returned error: %v", err)
- }
-
- want := &Membership{URL: String("u")}
- if !reflect.DeepEqual(membership, want) {
- t.Errorf("Organizations.GetOrgMembership returned %+v, want %+v", membership, want)
- }
-}
-
-func TestOrganizationsService_GetOrgMembership_SpecifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/memberships/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"url":"u"}`)
- })
-
- membership, _, err := client.Organizations.GetOrgMembership("u", "o")
- if err != nil {
- t.Errorf("Organizations.GetOrgMembership returned error: %v", err)
- }
-
- want := &Membership{URL: String("u")}
- if !reflect.DeepEqual(membership, want) {
- t.Errorf("Organizations.GetOrgMembership returned %+v, want %+v", membership, want)
- }
-}
-
-func TestOrganizationsService_EditOrgMembership_AuthenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Membership{State: String("active")}
-
- mux.HandleFunc("/user/memberships/orgs/o", func(w http.ResponseWriter, r *http.Request) {
- v := new(Membership)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"url":"u"}`)
- })
-
- membership, _, err := client.Organizations.EditOrgMembership("", "o", input)
- if err != nil {
- t.Errorf("Organizations.EditOrgMembership returned error: %v", err)
- }
-
- want := &Membership{URL: String("u")}
- if !reflect.DeepEqual(membership, want) {
- t.Errorf("Organizations.EditOrgMembership returned %+v, want %+v", membership, want)
- }
-}
-
-func TestOrganizationsService_EditOrgMembership_SpecifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Membership{State: String("active")}
-
- mux.HandleFunc("/orgs/o/memberships/u", func(w http.ResponseWriter, r *http.Request) {
- v := new(Membership)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PUT")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"url":"u"}`)
- })
-
- membership, _, err := client.Organizations.EditOrgMembership("u", "o", input)
- if err != nil {
- t.Errorf("Organizations.EditOrgMembership returned error: %v", err)
- }
-
- want := &Membership{URL: String("u")}
- if !reflect.DeepEqual(membership, want) {
- t.Errorf("Organizations.EditOrgMembership returned %+v, want %+v", membership, want)
- }
-}
-
-func TestOrganizationsService_RemoveOrgMembership(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/memberships/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Organizations.RemoveOrgMembership("u", "o")
- if err != nil {
- t.Errorf("Organizations.RemoveOrgMembership returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/orgs_teams.go b/vendor/src/github.com/google/go-github/github/orgs_teams.go
deleted file mode 100644
index 8e8550c..0000000
--- a/vendor/src/github.com/google/go-github/github/orgs_teams.go
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// Team represents a team within a GitHub organization. Teams are used to
-// manage access to an organization's repositories.
-type Team struct {
- ID *int `json:"id,omitempty"`
- Name *string `json:"name,omitempty"`
- Description *string `json:"description,omitempty"`
- URL *string `json:"url,omitempty"`
- Slug *string `json:"slug,omitempty"`
-
- // Permission is deprecated when creating or editing a team in an org
- // using the new GitHub permission model. It no longer identifies the
- // permission a team has on its repos, but only specifies the default
- // permission a repo is initially added with. Avoid confusion by
- // specifying a permission value when calling AddTeamRepo.
- Permission *string `json:"permission,omitempty"`
-
- // Privacy identifies the level of privacy this team should have.
- // Possible values are:
- // secret - only visible to organization owners and members of this team
- // closed - visible to all members of this organization
- // Default is "secret".
- Privacy *string `json:"privacy,omitempty"`
-
- MembersCount *int `json:"members_count,omitempty"`
- ReposCount *int `json:"repos_count,omitempty"`
- Organization *Organization `json:"organization,omitempty"`
- MembersURL *string `json:"members_url,omitempty"`
- RepositoriesURL *string `json:"repositories_url,omitempty"`
-}
-
-func (t Team) String() string {
- return Stringify(t)
-}
-
-// ListTeams lists all of the teams for an organization.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/teams/#list-teams
-func (s *OrganizationsService) ListTeams(org string, opt *ListOptions) ([]*Team, *Response, error) {
- u := fmt.Sprintf("orgs/%v/teams", org)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- teams := new([]*Team)
- resp, err := s.client.Do(req, teams)
- if err != nil {
- return nil, resp, err
- }
-
- return *teams, resp, err
-}
-
-// GetTeam fetches a team by ID.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/teams/#get-team
-func (s *OrganizationsService) GetTeam(team int) (*Team, *Response, error) {
- u := fmt.Sprintf("teams/%v", team)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- t := new(Team)
- resp, err := s.client.Do(req, t)
- if err != nil {
- return nil, resp, err
- }
-
- return t, resp, err
-}
-
-// CreateTeam creates a new team within an organization.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/teams/#create-team
-func (s *OrganizationsService) CreateTeam(org string, team *Team) (*Team, *Response, error) {
- u := fmt.Sprintf("orgs/%v/teams", org)
- req, err := s.client.NewRequest("POST", u, team)
- if err != nil {
- return nil, nil, err
- }
-
- t := new(Team)
- resp, err := s.client.Do(req, t)
- if err != nil {
- return nil, resp, err
- }
-
- return t, resp, err
-}
-
-// EditTeam edits a team.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/teams/#edit-team
-func (s *OrganizationsService) EditTeam(id int, team *Team) (*Team, *Response, error) {
- u := fmt.Sprintf("teams/%v", id)
- req, err := s.client.NewRequest("PATCH", u, team)
- if err != nil {
- return nil, nil, err
- }
-
- t := new(Team)
- resp, err := s.client.Do(req, t)
- if err != nil {
- return nil, resp, err
- }
-
- return t, resp, err
-}
-
-// DeleteTeam deletes a team.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/teams/#delete-team
-func (s *OrganizationsService) DeleteTeam(team int) (*Response, error) {
- u := fmt.Sprintf("teams/%v", team)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// OrganizationListTeamMembersOptions specifies the optional parameters to the
-// OrganizationsService.ListTeamMembers method.
-type OrganizationListTeamMembersOptions struct {
- // Role filters members returned by their role in the team. Possible
- // values are "all", "member", "maintainer". Default is "all".
- Role string `url:"role,omitempty"`
-
- ListOptions
-}
-
-// ListTeamMembers lists all of the users who are members of the specified
-// team.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/teams/#list-team-members
-func (s *OrganizationsService) ListTeamMembers(team int, opt *OrganizationListTeamMembersOptions) ([]*User, *Response, error) {
- u := fmt.Sprintf("teams/%v/members", team)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- members := new([]*User)
- resp, err := s.client.Do(req, members)
- if err != nil {
- return nil, resp, err
- }
-
- return *members, resp, err
-}
-
-// IsTeamMember checks if a user is a member of the specified team.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/teams/#get-team-member
-func (s *OrganizationsService) IsTeamMember(team int, user string) (bool, *Response, error) {
- u := fmt.Sprintf("teams/%v/members/%v", team, user)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return false, nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- member, err := parseBoolResponse(err)
- return member, resp, err
-}
-
-// ListTeamRepos lists the repositories that the specified team has access to.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/teams/#list-team-repos
-func (s *OrganizationsService) ListTeamRepos(team int, opt *ListOptions) ([]*Repository, *Response, error) {
- u := fmt.Sprintf("teams/%v/repos", team)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- repos := new([]*Repository)
- resp, err := s.client.Do(req, repos)
- if err != nil {
- return nil, resp, err
- }
-
- return *repos, resp, err
-}
-
-// IsTeamRepo checks if a team manages the specified repository. If the
-// repository is managed by team, a Repository is returned which includes the
-// permissions team has for that repo.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/teams/#check-if-a-team-manages-a-repository
-func (s *OrganizationsService) IsTeamRepo(team int, owner string, repo string) (*Repository, *Response, error) {
- u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- req.Header.Set("Accept", mediaTypeOrgPermissionRepo)
-
- repository := new(Repository)
- resp, err := s.client.Do(req, repository)
- if err != nil {
- return nil, resp, err
- }
-
- return repository, resp, err
-}
-
-// OrganizationAddTeamRepoOptions specifies the optional parameters to the
-// OrganizationsService.AddTeamRepo method.
-type OrganizationAddTeamRepoOptions struct {
- // Permission specifies the permission to grant the team on this repository.
- // Possible values are:
- // pull - team members can pull, but not push to or administer this repository
- // push - team members can pull and push, but not administer this repository
- // admin - team members can pull, push and administer this repository
- //
- // If not specified, the team's permission attribute will be used.
- Permission string `json:"permission,omitempty"`
-}
-
-// AddTeamRepo adds a repository to be managed by the specified team. The
-// specified repository must be owned by the organization to which the team
-// belongs, or a direct fork of a repository owned by the organization.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/teams/#add-team-repo
-func (s *OrganizationsService) AddTeamRepo(team int, owner string, repo string, opt *OrganizationAddTeamRepoOptions) (*Response, error) {
- u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo)
- req, err := s.client.NewRequest("PUT", u, opt)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// RemoveTeamRepo removes a repository from being managed by the specified
-// team. Note that this does not delete the repository, it just removes it
-// from the team.
-//
-// GitHub API docs: http://developer.github.com/v3/orgs/teams/#remove-team-repo
-func (s *OrganizationsService) RemoveTeamRepo(team int, owner string, repo string) (*Response, error) {
- u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// ListUserTeams lists a user's teams
-// GitHub API docs: https://developer.github.com/v3/orgs/teams/#list-user-teams
-func (s *OrganizationsService) ListUserTeams(opt *ListOptions) ([]*Team, *Response, error) {
- u := "user/teams"
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- teams := new([]*Team)
- resp, err := s.client.Do(req, teams)
- if err != nil {
- return nil, resp, err
- }
-
- return *teams, resp, err
-}
-
-// GetTeamMembership returns the membership status for a user in a team.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/teams/#get-team-membership
-func (s *OrganizationsService) GetTeamMembership(team int, user string) (*Membership, *Response, error) {
- u := fmt.Sprintf("teams/%v/memberships/%v", team, user)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- t := new(Membership)
- resp, err := s.client.Do(req, t)
- if err != nil {
- return nil, resp, err
- }
-
- return t, resp, err
-}
-
-// OrganizationAddTeamMembershipOptions does stuff specifies the optional
-// parameters to the OrganizationsService.AddTeamMembership method.
-type OrganizationAddTeamMembershipOptions struct {
- // Role specifies the role the user should have in the team. Possible
- // values are:
- // member - a normal member of the team
- // maintainer - a team maintainer. Able to add/remove other team
- // members, promote other team members to team
- // maintainer, and edit the team’s name and description
- //
- // Default value is "member".
- Role string `json:"role,omitempty"`
-}
-
-// AddTeamMembership adds or invites a user to a team.
-//
-// In order to add a membership between a user and a team, the authenticated
-// user must have 'admin' permissions to the team or be an owner of the
-// organization that the team is associated with.
-//
-// If the user is already a part of the team's organization (meaning they're on
-// at least one other team in the organization), this endpoint will add the
-// user to the team.
-//
-// If the user is completely unaffiliated with the team's organization (meaning
-// they're on none of the organization's teams), this endpoint will send an
-// invitation to the user via email. This newly-created membership will be in
-// the "pending" state until the user accepts the invitation, at which point
-// the membership will transition to the "active" state and the user will be
-// added as a member of the team.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/teams/#add-team-membership
-func (s *OrganizationsService) AddTeamMembership(team int, user string, opt *OrganizationAddTeamMembershipOptions) (*Membership, *Response, error) {
- u := fmt.Sprintf("teams/%v/memberships/%v", team, user)
- req, err := s.client.NewRequest("PUT", u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- t := new(Membership)
- resp, err := s.client.Do(req, t)
- if err != nil {
- return nil, resp, err
- }
-
- return t, resp, err
-}
-
-// RemoveTeamMembership removes a user from a team.
-//
-// GitHub API docs: https://developer.github.com/v3/orgs/teams/#remove-team-membership
-func (s *OrganizationsService) RemoveTeamMembership(team int, user string) (*Response, error) {
- u := fmt.Sprintf("teams/%v/memberships/%v", team, user)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/orgs_teams_test.go b/vendor/src/github.com/google/go-github/github/orgs_teams_test.go
deleted file mode 100644
index 4dec123..0000000
--- a/vendor/src/github.com/google/go-github/github/orgs_teams_test.go
+++ /dev/null
@@ -1,501 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestOrganizationsService_ListTeams(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/teams", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- teams, _, err := client.Organizations.ListTeams("o", opt)
- if err != nil {
- t.Errorf("Organizations.ListTeams returned error: %v", err)
- }
-
- want := []*Team{{ID: Int(1)}}
- if !reflect.DeepEqual(teams, want) {
- t.Errorf("Organizations.ListTeams returned %+v, want %+v", teams, want)
- }
-}
-
-func TestOrganizationsService_ListTeams_invalidOrg(t *testing.T) {
- _, _, err := client.Organizations.ListTeams("%", nil)
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_GetTeam(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1, "name":"n", "description": "d", "url":"u", "slug": "s", "permission":"p"}`)
- })
-
- team, _, err := client.Organizations.GetTeam(1)
- if err != nil {
- t.Errorf("Organizations.GetTeam returned error: %v", err)
- }
-
- want := &Team{ID: Int(1), Name: String("n"), Description: String("d"), URL: String("u"), Slug: String("s"), Permission: String("p")}
- if !reflect.DeepEqual(team, want) {
- t.Errorf("Organizations.GetTeam returned %+v, want %+v", team, want)
- }
-}
-
-func TestOrganizationsService_CreateTeam(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Team{Name: String("n"), Privacy: String("closed")}
-
- mux.HandleFunc("/orgs/o/teams", func(w http.ResponseWriter, r *http.Request) {
- v := new(Team)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- team, _, err := client.Organizations.CreateTeam("o", input)
- if err != nil {
- t.Errorf("Organizations.CreateTeam returned error: %v", err)
- }
-
- want := &Team{ID: Int(1)}
- if !reflect.DeepEqual(team, want) {
- t.Errorf("Organizations.CreateTeam returned %+v, want %+v", team, want)
- }
-}
-
-func TestOrganizationsService_CreateTeam_invalidOrg(t *testing.T) {
- _, _, err := client.Organizations.CreateTeam("%", nil)
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_EditTeam(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Team{Name: String("n"), Privacy: String("closed")}
-
- mux.HandleFunc("/teams/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(Team)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- team, _, err := client.Organizations.EditTeam(1, input)
- if err != nil {
- t.Errorf("Organizations.EditTeam returned error: %v", err)
- }
-
- want := &Team{ID: Int(1)}
- if !reflect.DeepEqual(team, want) {
- t.Errorf("Organizations.EditTeam returned %+v, want %+v", team, want)
- }
-}
-
-func TestOrganizationsService_DeleteTeam(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Organizations.DeleteTeam(1)
- if err != nil {
- t.Errorf("Organizations.DeleteTeam returned error: %v", err)
- }
-}
-
-func TestOrganizationsService_ListTeamMembers(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/members", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"role": "member", "page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &OrganizationListTeamMembersOptions{Role: "member", ListOptions: ListOptions{Page: 2}}
- members, _, err := client.Organizations.ListTeamMembers(1, opt)
- if err != nil {
- t.Errorf("Organizations.ListTeamMembers returned error: %v", err)
- }
-
- want := []*User{{ID: Int(1)}}
- if !reflect.DeepEqual(members, want) {
- t.Errorf("Organizations.ListTeamMembers returned %+v, want %+v", members, want)
- }
-}
-
-func TestOrganizationsService_IsTeamMember_true(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- })
-
- member, _, err := client.Organizations.IsTeamMember(1, "u")
- if err != nil {
- t.Errorf("Organizations.IsTeamMember returned error: %v", err)
- }
- if want := true; member != want {
- t.Errorf("Organizations.IsTeamMember returned %+v, want %+v", member, want)
- }
-}
-
-// ensure that a 404 response is interpreted as "false" and not an error
-func TestOrganizationsService_IsTeamMember_false(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNotFound)
- })
-
- member, _, err := client.Organizations.IsTeamMember(1, "u")
- if err != nil {
- t.Errorf("Organizations.IsTeamMember returned error: %+v", err)
- }
- if want := false; member != want {
- t.Errorf("Organizations.IsTeamMember returned %+v, want %+v", member, want)
- }
-}
-
-// ensure that a 400 response is interpreted as an actual error, and not simply
-// as "false" like the above case of a 404
-func TestOrganizationsService_IsTeamMember_error(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- http.Error(w, "BadRequest", http.StatusBadRequest)
- })
-
- member, _, err := client.Organizations.IsTeamMember(1, "u")
- if err == nil {
- t.Errorf("Expected HTTP 400 response")
- }
- if want := false; member != want {
- t.Errorf("Organizations.IsTeamMember returned %+v, want %+v", member, want)
- }
-}
-
-func TestOrganizationsService_IsTeamMember_invalidUser(t *testing.T) {
- _, _, err := client.Organizations.IsTeamMember(1, "%")
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_PublicizeMembership(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Organizations.PublicizeMembership("o", "u")
- if err != nil {
- t.Errorf("Organizations.PublicizeMembership returned error: %v", err)
- }
-}
-
-func TestOrganizationsService_PublicizeMembership_invalidOrg(t *testing.T) {
- _, err := client.Organizations.PublicizeMembership("%", "u")
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_ConcealMembership(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/public_members/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Organizations.ConcealMembership("o", "u")
- if err != nil {
- t.Errorf("Organizations.ConcealMembership returned error: %v", err)
- }
-}
-
-func TestOrganizationsService_ConcealMembership_invalidOrg(t *testing.T) {
- _, err := client.Organizations.ConcealMembership("%", "u")
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_ListTeamRepos(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/repos", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- members, _, err := client.Organizations.ListTeamRepos(1, opt)
- if err != nil {
- t.Errorf("Organizations.ListTeamRepos returned error: %v", err)
- }
-
- want := []*Repository{{ID: Int(1)}}
- if !reflect.DeepEqual(members, want) {
- t.Errorf("Organizations.ListTeamRepos returned %+v, want %+v", members, want)
- }
-}
-
-func TestOrganizationsService_IsTeamRepo_true(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeOrgPermissionRepo)
- fmt.Fprint(w, `{"id":1}`)
- })
-
- repo, _, err := client.Organizations.IsTeamRepo(1, "o", "r")
- if err != nil {
- t.Errorf("Organizations.IsTeamRepo returned error: %v", err)
- }
-
- want := &Repository{ID: Int(1)}
- if !reflect.DeepEqual(repo, want) {
- t.Errorf("Organizations.IsTeamRepo returned %+v, want %+v", repo, want)
- }
-}
-
-func TestOrganizationsService_IsTeamRepo_false(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNotFound)
- })
-
- repo, resp, err := client.Organizations.IsTeamRepo(1, "o", "r")
- if err == nil {
- t.Errorf("Expected HTTP 404 response")
- }
- if got, want := resp.Response.StatusCode, http.StatusNotFound; got != want {
- t.Errorf("Organizations.IsTeamRepo returned status %d, want %d", got, want)
- }
- if repo != nil {
- t.Errorf("Organizations.IsTeamRepo returned %+v, want nil", repo)
- }
-}
-
-func TestOrganizationsService_IsTeamRepo_error(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- http.Error(w, "BadRequest", http.StatusBadRequest)
- })
-
- repo, resp, err := client.Organizations.IsTeamRepo(1, "o", "r")
- if err == nil {
- t.Errorf("Expected HTTP 400 response")
- }
- if got, want := resp.Response.StatusCode, http.StatusBadRequest; got != want {
- t.Errorf("Organizations.IsTeamRepo returned status %d, want %d", got, want)
- }
- if repo != nil {
- t.Errorf("Organizations.IsTeamRepo returned %+v, want nil", repo)
- }
-}
-
-func TestOrganizationsService_IsTeamRepo_invalidOwner(t *testing.T) {
- _, _, err := client.Organizations.IsTeamRepo(1, "%", "r")
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_AddTeamRepo(t *testing.T) {
- setup()
- defer teardown()
-
- opt := &OrganizationAddTeamRepoOptions{Permission: "admin"}
-
- mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
- v := new(OrganizationAddTeamRepoOptions)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PUT")
- if !reflect.DeepEqual(v, opt) {
- t.Errorf("Request body = %+v, want %+v", v, opt)
- }
-
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Organizations.AddTeamRepo(1, "o", "r", opt)
- if err != nil {
- t.Errorf("Organizations.AddTeamRepo returned error: %v", err)
- }
-}
-
-func TestOrganizationsService_AddTeamRepo_noAccess(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- w.WriteHeader(StatusUnprocessableEntity)
- })
-
- _, err := client.Organizations.AddTeamRepo(1, "o", "r", nil)
- if err == nil {
- t.Errorf("Expcted error to be returned")
- }
-}
-
-func TestOrganizationsService_AddTeamRepo_invalidOwner(t *testing.T) {
- _, err := client.Organizations.AddTeamRepo(1, "%", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_RemoveTeamRepo(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Organizations.RemoveTeamRepo(1, "o", "r")
- if err != nil {
- t.Errorf("Organizations.RemoveTeamRepo returned error: %v", err)
- }
-}
-
-func TestOrganizationsService_RemoveTeamRepo_invalidOwner(t *testing.T) {
- _, err := client.Organizations.RemoveTeamRepo(1, "%", "r")
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_GetTeamMembership(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/memberships/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"url":"u", "state":"active"}`)
- })
-
- membership, _, err := client.Organizations.GetTeamMembership(1, "u")
- if err != nil {
- t.Errorf("Organizations.GetTeamMembership returned error: %v", err)
- }
-
- want := &Membership{URL: String("u"), State: String("active")}
- if !reflect.DeepEqual(membership, want) {
- t.Errorf("Organizations.GetTeamMembership returned %+v, want %+v", membership, want)
- }
-}
-
-func TestOrganizationsService_AddTeamMembership(t *testing.T) {
- setup()
- defer teardown()
-
- opt := &OrganizationAddTeamMembershipOptions{Role: "maintainer"}
-
- mux.HandleFunc("/teams/1/memberships/u", func(w http.ResponseWriter, r *http.Request) {
- v := new(OrganizationAddTeamMembershipOptions)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PUT")
- if !reflect.DeepEqual(v, opt) {
- t.Errorf("Request body = %+v, want %+v", v, opt)
- }
-
- fmt.Fprint(w, `{"url":"u", "state":"pending"}`)
- })
-
- membership, _, err := client.Organizations.AddTeamMembership(1, "u", opt)
- if err != nil {
- t.Errorf("Organizations.AddTeamMembership returned error: %v", err)
- }
-
- want := &Membership{URL: String("u"), State: String("pending")}
- if !reflect.DeepEqual(membership, want) {
- t.Errorf("Organizations.AddTeamMembership returned %+v, want %+v", membership, want)
- }
-}
-
-func TestOrganizationsService_RemoveTeamMembership(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/teams/1/memberships/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Organizations.RemoveTeamMembership(1, "u")
- if err != nil {
- t.Errorf("Organizations.RemoveTeamMembership returned error: %v", err)
- }
-}
-
-func TestOrganizationsService_ListUserTeams(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/teams", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "1"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 1}
- teams, _, err := client.Organizations.ListUserTeams(opt)
- if err != nil {
- t.Errorf("Organizations.ListUserTeams returned error: %v", err)
- }
-
- want := []*Team{{ID: Int(1)}}
- if !reflect.DeepEqual(teams, want) {
- t.Errorf("Organizations.ListUserTeams returned %+v, want %+v", teams, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/orgs_test.go b/vendor/src/github.com/google/go-github/github/orgs_test.go
deleted file mode 100644
index 8e02619..0000000
--- a/vendor/src/github.com/google/go-github/github/orgs_test.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestOrganizationsService_ListAll(t *testing.T) {
- setup()
- defer teardown()
-
- since := 1342004
- mux.HandleFunc("/organizations", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"since": "1342004"})
- fmt.Fprint(w, `[{"id":4314092}]`)
- })
-
- opt := &OrganizationsListOptions{Since: since}
- orgs, _, err := client.Organizations.ListAll(opt)
- if err != nil {
- t.Errorf("Organizations.ListAll returned error: %v", err)
- }
-
- want := []*Organization{{ID: Int(4314092)}}
- if !reflect.DeepEqual(orgs, want) {
- t.Errorf("Organizations.ListAll returned %+v, want %+v", orgs, want)
- }
-}
-
-func TestOrganizationsService_List_authenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/orgs", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{"id":1},{"id":2}]`)
- })
-
- orgs, _, err := client.Organizations.List("", nil)
- if err != nil {
- t.Errorf("Organizations.List returned error: %v", err)
- }
-
- want := []*Organization{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(orgs, want) {
- t.Errorf("Organizations.List returned %+v, want %+v", orgs, want)
- }
-}
-
-func TestOrganizationsService_List_specifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/orgs", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1},{"id":2}]`)
- })
-
- opt := &ListOptions{Page: 2}
- orgs, _, err := client.Organizations.List("u", opt)
- if err != nil {
- t.Errorf("Organizations.List returned error: %v", err)
- }
-
- want := []*Organization{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(orgs, want) {
- t.Errorf("Organizations.List returned %+v, want %+v", orgs, want)
- }
-}
-
-func TestOrganizationsService_List_invalidUser(t *testing.T) {
- _, _, err := client.Organizations.List("%", nil)
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_Get(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1, "login":"l", "url":"u", "avatar_url": "a", "location":"l"}`)
- })
-
- org, _, err := client.Organizations.Get("o")
- if err != nil {
- t.Errorf("Organizations.Get returned error: %v", err)
- }
-
- want := &Organization{ID: Int(1), Login: String("l"), URL: String("u"), AvatarURL: String("a"), Location: String("l")}
- if !reflect.DeepEqual(org, want) {
- t.Errorf("Organizations.Get returned %+v, want %+v", org, want)
- }
-}
-
-func TestOrganizationsService_Get_invalidOrg(t *testing.T) {
- _, _, err := client.Organizations.Get("%")
- testURLParseError(t, err)
-}
-
-func TestOrganizationsService_Edit(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Organization{Login: String("l")}
-
- mux.HandleFunc("/orgs/o", func(w http.ResponseWriter, r *http.Request) {
- v := new(Organization)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- org, _, err := client.Organizations.Edit("o", input)
- if err != nil {
- t.Errorf("Organizations.Edit returned error: %v", err)
- }
-
- want := &Organization{ID: Int(1)}
- if !reflect.DeepEqual(org, want) {
- t.Errorf("Organizations.Edit returned %+v, want %+v", org, want)
- }
-}
-
-func TestOrganizationsService_Edit_invalidOrg(t *testing.T) {
- _, _, err := client.Organizations.Edit("%", nil)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/pulls.go b/vendor/src/github.com/google/go-github/github/pulls.go
deleted file mode 100644
index 0900766..0000000
--- a/vendor/src/github.com/google/go-github/github/pulls.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// PullRequestsService handles communication with the pull request related
-// methods of the GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/pulls/
-type PullRequestsService service
-
-// PullRequest represents a GitHub pull request on a repository.
-type PullRequest struct {
- ID *int `json:"id,omitempty"`
- Number *int `json:"number,omitempty"`
- State *string `json:"state,omitempty"`
- Title *string `json:"title,omitempty"`
- Body *string `json:"body,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
- ClosedAt *time.Time `json:"closed_at,omitempty"`
- MergedAt *time.Time `json:"merged_at,omitempty"`
- User *User `json:"user,omitempty"`
- Merged *bool `json:"merged,omitempty"`
- Mergeable *bool `json:"mergeable,omitempty"`
- MergedBy *User `json:"merged_by,omitempty"`
- Comments *int `json:"comments,omitempty"`
- Commits *int `json:"commits,omitempty"`
- Additions *int `json:"additions,omitempty"`
- Deletions *int `json:"deletions,omitempty"`
- ChangedFiles *int `json:"changed_files,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- IssueURL *string `json:"issue_url,omitempty"`
- StatusesURL *string `json:"statuses_url,omitempty"`
- DiffURL *string `json:"diff_url,omitempty"`
- PatchURL *string `json:"patch_url,omitempty"`
- Assignee *User `json:"assignee,omitempty"` // probably only in webhooks
-
- Head *PullRequestBranch `json:"head,omitempty"`
- Base *PullRequestBranch `json:"base,omitempty"`
-}
-
-func (p PullRequest) String() string {
- return Stringify(p)
-}
-
-// PullRequestBranch represents a base or head branch in a GitHub pull request.
-type PullRequestBranch struct {
- Label *string `json:"label,omitempty"`
- Ref *string `json:"ref,omitempty"`
- SHA *string `json:"sha,omitempty"`
- Repo *Repository `json:"repo,omitempty"`
- User *User `json:"user,omitempty"`
-}
-
-// PullRequestListOptions specifies the optional parameters to the
-// PullRequestsService.List method.
-type PullRequestListOptions struct {
- // State filters pull requests based on their state. Possible values are:
- // open, closed. Default is "open".
- State string `url:"state,omitempty"`
-
- // Head filters pull requests by head user and branch name in the format of:
- // "user:ref-name".
- Head string `url:"head,omitempty"`
-
- // Base filters pull requests by base branch name.
- Base string `url:"base,omitempty"`
-
- // Sort specifies how to sort pull requests. Possible values are: created,
- // updated, popularity, long-running. Default is "created".
- Sort string `url:"sort,omitempty"`
-
- // Direction in which to sort pull requests. Possible values are: asc, desc.
- // If Sort is "created" or not specified, Default is "desc", otherwise Default
- // is "asc"
- Direction string `url:"direction,omitempty"`
-
- ListOptions
-}
-
-// List the pull requests for the specified repository.
-//
-// GitHub API docs: http://developer.github.com/v3/pulls/#list-pull-requests
-func (s *PullRequestsService) List(owner string, repo string, opt *PullRequestListOptions) ([]*PullRequest, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- pulls := new([]*PullRequest)
- resp, err := s.client.Do(req, pulls)
- if err != nil {
- return nil, resp, err
- }
-
- return *pulls, resp, err
-}
-
-// Get a single pull request.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/#get-a-single-pull-request
-func (s *PullRequestsService) Get(owner string, repo string, number int) (*PullRequest, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- pull := new(PullRequest)
- resp, err := s.client.Do(req, pull)
- if err != nil {
- return nil, resp, err
- }
-
- return pull, resp, err
-}
-
-// NewPullRequest represents a new pull request to be created.
-type NewPullRequest struct {
- Title *string `json:"title,omitempty"`
- Head *string `json:"head,omitempty"`
- Base *string `json:"base,omitempty"`
- Body *string `json:"body,omitempty"`
- Issue *int `json:"issue,omitempty"`
-}
-
-// Create a new pull request on the specified repository.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/#create-a-pull-request
-func (s *PullRequestsService) Create(owner string, repo string, pull *NewPullRequest) (*PullRequest, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo)
- req, err := s.client.NewRequest("POST", u, pull)
- if err != nil {
- return nil, nil, err
- }
-
- p := new(PullRequest)
- resp, err := s.client.Do(req, p)
- if err != nil {
- return nil, resp, err
- }
-
- return p, resp, err
-}
-
-// Edit a pull request.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/#update-a-pull-request
-func (s *PullRequestsService) Edit(owner string, repo string, number int, pull *PullRequest) (*PullRequest, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number)
- req, err := s.client.NewRequest("PATCH", u, pull)
- if err != nil {
- return nil, nil, err
- }
-
- p := new(PullRequest)
- resp, err := s.client.Do(req, p)
- if err != nil {
- return nil, resp, err
- }
-
- return p, resp, err
-}
-
-// ListCommits lists the commits in a pull request.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request
-func (s *PullRequestsService) ListCommits(owner string, repo string, number int, opt *ListOptions) ([]*RepositoryCommit, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/%d/commits", owner, repo, number)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- commits := new([]*RepositoryCommit)
- resp, err := s.client.Do(req, commits)
- if err != nil {
- return nil, resp, err
- }
-
- return *commits, resp, err
-}
-
-// ListFiles lists the files in a pull request.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/#list-pull-requests-files
-func (s *PullRequestsService) ListFiles(owner string, repo string, number int, opt *ListOptions) ([]*CommitFile, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/%d/files", owner, repo, number)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- commitFiles := new([]*CommitFile)
- resp, err := s.client.Do(req, commitFiles)
- if err != nil {
- return nil, resp, err
- }
-
- return *commitFiles, resp, err
-}
-
-// IsMerged checks if a pull request has been merged.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged
-func (s *PullRequestsService) IsMerged(owner string, repo string, number int) (bool, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return false, nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- merged, err := parseBoolResponse(err)
- return merged, resp, err
-}
-
-// PullRequestMergeResult represents the result of merging a pull request.
-type PullRequestMergeResult struct {
- SHA *string `json:"sha,omitempty"`
- Merged *bool `json:"merged,omitempty"`
- Message *string `json:"message,omitempty"`
-}
-
-// PullRequestOptions lets you define how a pull request will be merged.
-type PullRequestOptions struct {
- Squash bool
-}
-
-type pullRequestMergeRequest struct {
- CommitMessage *string `json:"commit_message"`
- Squash *bool `json:"squash,omitempty"`
-}
-
-// Merge a pull request (Merge Buttonâ„¢).
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/#merge-a-pull-request-merge-buttontrade
-func (s *PullRequestsService) Merge(owner string, repo string, number int, commitMessage string, options *PullRequestOptions) (*PullRequestMergeResult, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number)
-
- pullRequestBody := &pullRequestMergeRequest{CommitMessage: &commitMessage}
- if options != nil {
- pullRequestBody.Squash = &options.Squash
- }
- req, err := s.client.NewRequest("PUT", u, pullRequestBody)
-
- // TODO: This header will be unnecessary when the API is no longer in preview.
- req.Header.Set("Accept", mediaTypeSquashPreview)
- if err != nil {
- return nil, nil, err
- }
-
- mergeResult := new(PullRequestMergeResult)
- resp, err := s.client.Do(req, mergeResult)
- if err != nil {
- return nil, resp, err
- }
-
- return mergeResult, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/pulls_comments.go b/vendor/src/github.com/google/go-github/github/pulls_comments.go
deleted file mode 100644
index c7af85a..0000000
--- a/vendor/src/github.com/google/go-github/github/pulls_comments.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// PullRequestComment represents a comment left on a pull request.
-type PullRequestComment struct {
- ID *int `json:"id,omitempty"`
- InReplyTo *int `json:"in_reply_to,omitempty"`
- Body *string `json:"body,omitempty"`
- Path *string `json:"path,omitempty"`
- DiffHunk *string `json:"diff_hunk,omitempty"`
- Position *int `json:"position,omitempty"`
- OriginalPosition *int `json:"original_position,omitempty"`
- CommitID *string `json:"commit_id,omitempty"`
- OriginalCommitID *string `json:"original_commit_id,omitempty"`
- User *User `json:"user,omitempty"`
- Reactions *Reactions `json:"reactions,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- PullRequestURL *string `json:"pull_request_url,omitempty"`
-}
-
-func (p PullRequestComment) String() string {
- return Stringify(p)
-}
-
-// PullRequestListCommentsOptions specifies the optional parameters to the
-// PullRequestsService.ListComments method.
-type PullRequestListCommentsOptions struct {
- // Sort specifies how to sort comments. Possible values are: created, updated.
- Sort string `url:"sort,omitempty"`
-
- // Direction in which to sort comments. Possible values are: asc, desc.
- Direction string `url:"direction,omitempty"`
-
- // Since filters comments by time.
- Since time.Time `url:"since,omitempty"`
-
- ListOptions
-}
-
-// ListComments lists all comments on the specified pull request. Specifying a
-// pull request number of 0 will return all comments on all pull requests for
-// the repository.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/comments/#list-comments-on-a-pull-request
-func (s *PullRequestsService) ListComments(owner string, repo string, number int, opt *PullRequestListCommentsOptions) ([]*PullRequestComment, *Response, error) {
- var u string
- if number == 0 {
- u = fmt.Sprintf("repos/%v/%v/pulls/comments", owner, repo)
- } else {
- u = fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number)
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- comments := new([]*PullRequestComment)
- resp, err := s.client.Do(req, comments)
- if err != nil {
- return nil, resp, err
- }
-
- return *comments, resp, err
-}
-
-// GetComment fetches the specified pull request comment.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/comments/#get-a-single-comment
-func (s *PullRequestsService) GetComment(owner string, repo string, number int) (*PullRequestComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- comment := new(PullRequestComment)
- resp, err := s.client.Do(req, comment)
- if err != nil {
- return nil, resp, err
- }
-
- return comment, resp, err
-}
-
-// CreateComment creates a new comment on the specified pull request.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/comments/#create-a-comment
-func (s *PullRequestsService) CreateComment(owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number)
- req, err := s.client.NewRequest("POST", u, comment)
- if err != nil {
- return nil, nil, err
- }
-
- c := new(PullRequestComment)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// EditComment updates a pull request comment.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/comments/#edit-a-comment
-func (s *PullRequestsService) EditComment(owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number)
- req, err := s.client.NewRequest("PATCH", u, comment)
- if err != nil {
- return nil, nil, err
- }
-
- c := new(PullRequestComment)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// DeleteComment deletes a pull request comment.
-//
-// GitHub API docs: https://developer.github.com/v3/pulls/comments/#delete-a-comment
-func (s *PullRequestsService) DeleteComment(owner string, repo string, number int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/pulls_comments_test.go b/vendor/src/github.com/google/go-github/github/pulls_comments_test.go
deleted file mode 100644
index 5412ac8..0000000
--- a/vendor/src/github.com/google/go-github/github/pulls_comments_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
- "time"
-)
-
-func TestPullRequestsService_ListComments_allPulls(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/comments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- testFormValues(t, r, values{
- "sort": "updated",
- "direction": "desc",
- "since": "2002-02-10T15:30:00Z",
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &PullRequestListCommentsOptions{
- Sort: "updated",
- Direction: "desc",
- Since: time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC),
- ListOptions: ListOptions{Page: 2},
- }
- pulls, _, err := client.PullRequests.ListComments("o", "r", 0, opt)
- if err != nil {
- t.Errorf("PullRequests.ListComments returned error: %v", err)
- }
-
- want := []*PullRequestComment{{ID: Int(1)}}
- if !reflect.DeepEqual(pulls, want) {
- t.Errorf("PullRequests.ListComments returned %+v, want %+v", pulls, want)
- }
-}
-
-func TestPullRequestsService_ListComments_specificPull(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/1/comments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- pulls, _, err := client.PullRequests.ListComments("o", "r", 1, nil)
- if err != nil {
- t.Errorf("PullRequests.ListComments returned error: %v", err)
- }
-
- want := []*PullRequestComment{{ID: Int(1)}}
- if !reflect.DeepEqual(pulls, want) {
- t.Errorf("PullRequests.ListComments returned %+v, want %+v", pulls, want)
- }
-}
-
-func TestPullRequestsService_ListComments_invalidOwner(t *testing.T) {
- _, _, err := client.PullRequests.ListComments("%", "r", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestPullRequestsService_GetComment(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/comments/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.PullRequests.GetComment("o", "r", 1)
- if err != nil {
- t.Errorf("PullRequests.GetComment returned error: %v", err)
- }
-
- want := &PullRequestComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("PullRequests.GetComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestPullRequestsService_GetComment_invalidOwner(t *testing.T) {
- _, _, err := client.PullRequests.GetComment("%", "r", 1)
- testURLParseError(t, err)
-}
-
-func TestPullRequestsService_CreateComment(t *testing.T) {
- setup()
- defer teardown()
-
- input := &PullRequestComment{Body: String("b")}
-
- mux.HandleFunc("/repos/o/r/pulls/1/comments", func(w http.ResponseWriter, r *http.Request) {
- v := new(PullRequestComment)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.PullRequests.CreateComment("o", "r", 1, input)
- if err != nil {
- t.Errorf("PullRequests.CreateComment returned error: %v", err)
- }
-
- want := &PullRequestComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("PullRequests.CreateComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestPullRequestsService_CreateComment_invalidOwner(t *testing.T) {
- _, _, err := client.PullRequests.CreateComment("%", "r", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestPullRequestsService_EditComment(t *testing.T) {
- setup()
- defer teardown()
-
- input := &PullRequestComment{Body: String("b")}
-
- mux.HandleFunc("/repos/o/r/pulls/comments/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(PullRequestComment)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.PullRequests.EditComment("o", "r", 1, input)
- if err != nil {
- t.Errorf("PullRequests.EditComment returned error: %v", err)
- }
-
- want := &PullRequestComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("PullRequests.EditComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestPullRequestsService_EditComment_invalidOwner(t *testing.T) {
- _, _, err := client.PullRequests.EditComment("%", "r", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestPullRequestsService_DeleteComment(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/comments/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.PullRequests.DeleteComment("o", "r", 1)
- if err != nil {
- t.Errorf("PullRequests.DeleteComment returned error: %v", err)
- }
-}
-
-func TestPullRequestsService_DeleteComment_invalidOwner(t *testing.T) {
- _, err := client.PullRequests.DeleteComment("%", "r", 1)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/pulls_test.go b/vendor/src/github.com/google/go-github/github/pulls_test.go
deleted file mode 100644
index 1f8c58c..0000000
--- a/vendor/src/github.com/google/go-github/github/pulls_test.go
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestPullRequestsService_List(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "state": "closed",
- "head": "h",
- "base": "b",
- "sort": "created",
- "direction": "desc",
- "page": "2",
- })
- fmt.Fprint(w, `[{"number":1}]`)
- })
-
- opt := &PullRequestListOptions{"closed", "h", "b", "created", "desc", ListOptions{Page: 2}}
- pulls, _, err := client.PullRequests.List("o", "r", opt)
- if err != nil {
- t.Errorf("PullRequests.List returned error: %v", err)
- }
-
- want := []*PullRequest{{Number: Int(1)}}
- if !reflect.DeepEqual(pulls, want) {
- t.Errorf("PullRequests.List returned %+v, want %+v", pulls, want)
- }
-}
-
-func TestPullRequestsService_List_invalidOwner(t *testing.T) {
- _, _, err := client.PullRequests.List("%", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestPullRequestsService_Get(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"number":1}`)
- })
-
- pull, _, err := client.PullRequests.Get("o", "r", 1)
- if err != nil {
- t.Errorf("PullRequests.Get returned error: %v", err)
- }
-
- want := &PullRequest{Number: Int(1)}
- if !reflect.DeepEqual(pull, want) {
- t.Errorf("PullRequests.Get returned %+v, want %+v", pull, want)
- }
-}
-
-func TestPullRequestsService_Get_headAndBase(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"number":1,"head":{"ref":"r2","repo":{"id":2}},"base":{"ref":"r1","repo":{"id":1}}}`)
- })
-
- pull, _, err := client.PullRequests.Get("o", "r", 1)
- if err != nil {
- t.Errorf("PullRequests.Get returned error: %v", err)
- }
-
- want := &PullRequest{
- Number: Int(1),
- Head: &PullRequestBranch{
- Ref: String("r2"),
- Repo: &Repository{ID: Int(2)},
- },
- Base: &PullRequestBranch{
- Ref: String("r1"),
- Repo: &Repository{ID: Int(1)},
- },
- }
- if !reflect.DeepEqual(pull, want) {
- t.Errorf("PullRequests.Get returned %+v, want %+v", pull, want)
- }
-}
-
-func TestPullRequestService_Get_DiffURLAndPatchURL(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"number":1,
- "diff_url": "https://github.com/octocat/Hello-World/pull/1347.diff",
- "patch_url": "https://github.com/octocat/Hello-World/pull/1347.patch"}`)
- })
-
- pull, _, err := client.PullRequests.Get("o", "r", 1)
- if err != nil {
- t.Errorf("PullRequests.Get returned error: %v", err)
- }
-
- want := &PullRequest{Number: Int(1), DiffURL: String("https://github.com/octocat/Hello-World/pull/1347.diff"), PatchURL: String("https://github.com/octocat/Hello-World/pull/1347.patch")}
- if !reflect.DeepEqual(pull, want) {
- t.Errorf("PullRequests.Get returned %+v, want %+v", pull, want)
- }
-}
-
-func TestPullRequestsService_Get_invalidOwner(t *testing.T) {
- _, _, err := client.PullRequests.Get("%", "r", 1)
- testURLParseError(t, err)
-}
-
-func TestPullRequestsService_Create(t *testing.T) {
- setup()
- defer teardown()
-
- input := &NewPullRequest{Title: String("t")}
-
- mux.HandleFunc("/repos/o/r/pulls", func(w http.ResponseWriter, r *http.Request) {
- v := new(NewPullRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"number":1}`)
- })
-
- pull, _, err := client.PullRequests.Create("o", "r", input)
- if err != nil {
- t.Errorf("PullRequests.Create returned error: %v", err)
- }
-
- want := &PullRequest{Number: Int(1)}
- if !reflect.DeepEqual(pull, want) {
- t.Errorf("PullRequests.Create returned %+v, want %+v", pull, want)
- }
-}
-
-func TestPullRequestsService_Create_invalidOwner(t *testing.T) {
- _, _, err := client.PullRequests.Create("%", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestPullRequestsService_Edit(t *testing.T) {
- setup()
- defer teardown()
-
- input := &PullRequest{Title: String("t")}
-
- mux.HandleFunc("/repos/o/r/pulls/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(PullRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"number":1}`)
- })
-
- pull, _, err := client.PullRequests.Edit("o", "r", 1, input)
- if err != nil {
- t.Errorf("PullRequests.Edit returned error: %v", err)
- }
-
- want := &PullRequest{Number: Int(1)}
- if !reflect.DeepEqual(pull, want) {
- t.Errorf("PullRequests.Edit returned %+v, want %+v", pull, want)
- }
-}
-
-func TestPullRequestsService_Edit_invalidOwner(t *testing.T) {
- _, _, err := client.PullRequests.Edit("%", "r", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestPullRequestsService_ListCommits(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/1/commits", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `
- [
- {
- "sha": "3",
- "parents": [
- {
- "sha": "2"
- }
- ]
- },
- {
- "sha": "2",
- "parents": [
- {
- "sha": "1"
- }
- ]
- }
- ]`)
- })
-
- opt := &ListOptions{Page: 2}
- commits, _, err := client.PullRequests.ListCommits("o", "r", 1, opt)
- if err != nil {
- t.Errorf("PullRequests.ListCommits returned error: %v", err)
- }
-
- want := []*RepositoryCommit{
- {
- SHA: String("3"),
- Parents: []Commit{
- {
- SHA: String("2"),
- },
- },
- },
- {
- SHA: String("2"),
- Parents: []Commit{
- {
- SHA: String("1"),
- },
- },
- },
- }
- if !reflect.DeepEqual(commits, want) {
- t.Errorf("PullRequests.ListCommits returned %+v, want %+v", commits, want)
- }
-}
-
-func TestPullRequestsService_ListFiles(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/1/files", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `
- [
- {
- "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e",
- "filename": "file1.txt",
- "status": "added",
- "additions": 103,
- "deletions": 21,
- "changes": 124,
- "patch": "@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test"
- },
- {
- "sha": "f61aebed695e2e4193db5e6dcb09b5b57875f334",
- "filename": "file2.txt",
- "status": "modified",
- "additions": 5,
- "deletions": 3,
- "changes": 103,
- "patch": "@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test"
- }
- ]`)
- })
-
- opt := &ListOptions{Page: 2}
- commitFiles, _, err := client.PullRequests.ListFiles("o", "r", 1, opt)
- if err != nil {
- t.Errorf("PullRequests.ListFiles returned error: %v", err)
- }
-
- want := []*CommitFile{
- {
- SHA: String("6dcb09b5b57875f334f61aebed695e2e4193db5e"),
- Filename: String("file1.txt"),
- Additions: Int(103),
- Deletions: Int(21),
- Changes: Int(124),
- Status: String("added"),
- Patch: String("@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test"),
- },
- {
- SHA: String("f61aebed695e2e4193db5e6dcb09b5b57875f334"),
- Filename: String("file2.txt"),
- Additions: Int(5),
- Deletions: Int(3),
- Changes: Int(103),
- Status: String("modified"),
- Patch: String("@@ -132,7 +132,7 @@ module Test @@ -1000,7 +1000,7 @@ module Test"),
- },
- }
-
- if !reflect.DeepEqual(commitFiles, want) {
- t.Errorf("PullRequests.ListFiles returned %+v, want %+v", commitFiles, want)
- }
-}
-
-func TestPullRequestsService_IsMerged(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/1/merge", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNoContent)
- })
-
- isMerged, _, err := client.PullRequests.IsMerged("o", "r", 1)
- if err != nil {
- t.Errorf("PullRequests.IsMerged returned error: %v", err)
- }
-
- want := true
- if !reflect.DeepEqual(isMerged, want) {
- t.Errorf("PullRequests.IsMerged returned %+v, want %+v", isMerged, want)
- }
-}
-
-func TestPullRequestsService_Merge(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/1/merge", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- testHeader(t, r, "Accept", mediaTypeSquashPreview)
- fmt.Fprint(w, `
- {
- "sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e",
- "merged": true,
- "message": "Pull Request successfully merged"
- }`)
- })
-
- options := &PullRequestOptions{Squash: true}
- merge, _, err := client.PullRequests.Merge("o", "r", 1, "merging pull request", options)
- if err != nil {
- t.Errorf("PullRequests.Merge returned error: %v", err)
- }
-
- want := &PullRequestMergeResult{
- SHA: String("6dcb09b5b57875f334f61aebed695e2e4193db5e"),
- Merged: Bool(true),
- Message: String("Pull Request successfully merged"),
- }
- if !reflect.DeepEqual(merge, want) {
- t.Errorf("PullRequests.Merge returned %+v, want %+v", merge, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/reactions.go b/vendor/src/github.com/google/go-github/github/reactions.go
deleted file mode 100644
index 03b131b..0000000
--- a/vendor/src/github.com/google/go-github/github/reactions.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// ReactionsService provides access to the reactions-related functions in the
-// GitHub API.
-//
-// GitHub API docs: https://developer.github.com/v3/reactions/
-type ReactionsService service
-
-// Reaction represents a GitHub reaction.
-type Reaction struct {
- // ID is the Reaction ID.
- ID *int `json:"id,omitempty"`
- User *User `json:"user,omitempty"`
- // Content is the type of reaction.
- // Possible values are:
- // "+1", "-1", "laugh", "confused", "heart", "hooray".
- Content *string `json:"content,omitempty"`
-}
-
-// Reactions represents a summary of GitHub reactions.
-type Reactions struct {
- TotalCount *int `json:"total_count,omitempty"`
- PlusOne *int `json:"+1,omitempty"`
- MinusOne *int `json:"-1,omitempty"`
- Laugh *int `json:"laugh,omitempty"`
- Confused *int `json:"confused,omitempty"`
- Heart *int `json:"heart,omitempty"`
- Hooray *int `json:"hooray,omitempty"`
- URL *string `json:"url,omitempty"`
-}
-
-func (r Reaction) String() string {
- return Stringify(r)
-}
-
-// ListCommentReactions lists the reactions for a commit comment.
-//
-// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-a-commit-comment
-func (s *ReactionsService) ListCommentReactions(owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- var m []*Reaction
- resp, err := s.client.Do(req, &m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// CreateCommentReaction creates a reaction for a commit comment.
-// Note that if you have already created a reaction of type content, the
-// previously created reaction will be returned with Status: 200 OK.
-//
-// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-a-commit-comment
-func (s ReactionsService) CreateCommentReaction(owner, repo string, id int, content string) (*Reaction, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id)
-
- body := &Reaction{Content: String(content)}
- req, err := s.client.NewRequest("POST", u, body)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- m := &Reaction{}
- resp, err := s.client.Do(req, m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// ListIssueReactions lists the reactions for an issue.
-//
-// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue
-func (s *ReactionsService) ListIssueReactions(owner, repo string, number int, opt *ListOptions) ([]*Reaction, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- var m []*Reaction
- resp, err := s.client.Do(req, &m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// CreateIssueReaction creates a reaction for an issue.
-// Note that if you have already created a reaction of type content, the
-// previously created reaction will be returned with Status: 200 OK.
-//
-// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue
-func (s ReactionsService) CreateIssueReaction(owner, repo string, number int, content string) (*Reaction, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number)
-
- body := &Reaction{Content: String(content)}
- req, err := s.client.NewRequest("POST", u, body)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- m := &Reaction{}
- resp, err := s.client.Do(req, m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// ListIssueCommentReactions lists the reactions for an issue comment.
-//
-// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue-comment
-func (s *ReactionsService) ListIssueCommentReactions(owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- var m []*Reaction
- resp, err := s.client.Do(req, &m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// CreateIssueCommentReaction creates a reaction for an issue comment.
-// Note that if you have already created a reaction of type content, the
-// previously created reaction will be returned with Status: 200 OK.
-//
-// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue-comment
-func (s ReactionsService) CreateIssueCommentReaction(owner, repo string, id int, content string) (*Reaction, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id)
-
- body := &Reaction{Content: String(content)}
- req, err := s.client.NewRequest("POST", u, body)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- m := &Reaction{}
- resp, err := s.client.Do(req, m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// ListPullRequestCommentReactions lists the reactions for a pull request review comment.
-//
-// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue-comment
-func (s *ReactionsService) ListPullRequestCommentReactions(owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- var m []*Reaction
- resp, err := s.client.Do(req, &m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// CreatePullRequestCommentReaction creates a reaction for a pull request review comment.
-// Note that if you have already created a reaction of type content, the
-// previously created reaction will be returned with Status: 200 OK.
-//
-// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue-comment
-func (s ReactionsService) CreatePullRequestCommentReaction(owner, repo string, id int, content string) (*Reaction, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id)
-
- body := &Reaction{Content: String(content)}
- req, err := s.client.NewRequest("POST", u, body)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- m := &Reaction{}
- resp, err := s.client.Do(req, m)
- if err != nil {
- return nil, resp, err
- }
-
- return m, resp, nil
-}
-
-// DeleteReaction deletes a reaction.
-//
-// GitHub API docs: https://developer.github.com/v3/reaction/reactions/#delete-a-reaction-archive
-func (s *ReactionsService) DeleteReaction(id int) (*Response, error) {
- u := fmt.Sprintf("reactions/%v", id)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/reactions_test.go b/vendor/src/github.com/google/go-github/github/reactions_test.go
deleted file mode 100644
index 91ef9b5..0000000
--- a/vendor/src/github.com/google/go-github/github/reactions_test.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestReactionsService_ListCommentReactions(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte(`[{"id":1,"user":{"login":"l","id":2},"content":"+1"}]`))
- })
-
- got, _, err := client.Reactions.ListCommentReactions("o", "r", 1, nil)
- if err != nil {
- t.Errorf("ListCommentReactions returned error: %v", err)
- }
- if want := []*Reaction{{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}}; !reflect.DeepEqual(got, want) {
- t.Errorf("ListCommentReactions = %+v, want %+v", got, want)
- }
-}
-
-func TestReactionsService_CreateCommentReaction(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
-
- w.WriteHeader(http.StatusCreated)
- w.Write([]byte(`{"id":1,"user":{"login":"l","id":2},"content":"+1"}`))
- })
-
- got, _, err := client.Reactions.CreateCommentReaction("o", "r", 1, "+1")
- if err != nil {
- t.Errorf("CreateCommentReaction returned error: %v", err)
- }
- want := &Reaction{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("CreateCommentReaction = %+v, want %+v", got, want)
- }
-}
-
-func TestReactionsService_ListIssueReactions(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/reactions", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte(`[{"id":1,"user":{"login":"l","id":2},"content":"+1"}]`))
- })
-
- got, _, err := client.Reactions.ListIssueReactions("o", "r", 1, nil)
- if err != nil {
- t.Errorf("ListIssueReactions returned error: %v", err)
- }
- if want := []*Reaction{{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}}; !reflect.DeepEqual(got, want) {
- t.Errorf("ListIssueReactions = %+v, want %+v", got, want)
- }
-}
-
-func TestReactionsService_CreateIssueReaction(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/1/reactions", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
-
- w.WriteHeader(http.StatusCreated)
- w.Write([]byte(`{"id":1,"user":{"login":"l","id":2},"content":"+1"}`))
- })
-
- got, _, err := client.Reactions.CreateIssueReaction("o", "r", 1, "+1")
- if err != nil {
- t.Errorf("CreateIssueReaction returned error: %v", err)
- }
- want := &Reaction{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("CreateIssueReaction = %+v, want %+v", got, want)
- }
-}
-
-func TestReactionsService_ListIssueCommentReactions(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte(`[{"id":1,"user":{"login":"l","id":2},"content":"+1"}]`))
- })
-
- got, _, err := client.Reactions.ListIssueCommentReactions("o", "r", 1, nil)
- if err != nil {
- t.Errorf("ListIssueCommentReactions returned error: %v", err)
- }
- if want := []*Reaction{{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}}; !reflect.DeepEqual(got, want) {
- t.Errorf("ListIssueCommentReactions = %+v, want %+v", got, want)
- }
-}
-
-func TestReactionsService_CreateIssueCommentReaction(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/issues/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
-
- w.WriteHeader(http.StatusCreated)
- w.Write([]byte(`{"id":1,"user":{"login":"l","id":2},"content":"+1"}`))
- })
-
- got, _, err := client.Reactions.CreateIssueCommentReaction("o", "r", 1, "+1")
- if err != nil {
- t.Errorf("CreateIssueCommentReaction returned error: %v", err)
- }
- want := &Reaction{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("CreateIssueCommentReaction = %+v, want %+v", got, want)
- }
-}
-
-func TestReactionsService_ListPullRequestCommentReactions(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
-
- w.WriteHeader(http.StatusOK)
- w.Write([]byte(`[{"id":1,"user":{"login":"l","id":2},"content":"+1"}]`))
- })
-
- got, _, err := client.Reactions.ListPullRequestCommentReactions("o", "r", 1, nil)
- if err != nil {
- t.Errorf("ListPullRequestCommentReactions returned error: %v", err)
- }
- if want := []*Reaction{{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}}; !reflect.DeepEqual(got, want) {
- t.Errorf("ListPullRequestCommentReactions = %+v, want %+v", got, want)
- }
-}
-
-func TestReactionsService_CreatePullRequestCommentReaction(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pulls/comments/1/reactions", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
-
- w.WriteHeader(http.StatusCreated)
- w.Write([]byte(`{"id":1,"user":{"login":"l","id":2},"content":"+1"}`))
- })
-
- got, _, err := client.Reactions.CreatePullRequestCommentReaction("o", "r", 1, "+1")
- if err != nil {
- t.Errorf("CreatePullRequestCommentReaction returned error: %v", err)
- }
- want := &Reaction{ID: Int(1), User: &User{Login: String("l"), ID: Int(2)}, Content: String("+1")}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("CreatePullRequestCommentReaction = %+v, want %+v", got, want)
- }
-}
-
-func TestReactionsService_DeleteReaction(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/reactions/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
-
- w.WriteHeader(http.StatusNoContent)
- })
-
- if _, err := client.Reactions.DeleteReaction(1); err != nil {
- t.Errorf("DeleteReaction returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos.go b/vendor/src/github.com/google/go-github/github/repos.go
deleted file mode 100644
index fb402ee..0000000
--- a/vendor/src/github.com/google/go-github/github/repos.go
+++ /dev/null
@@ -1,597 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// RepositoriesService handles communication with the repository related
-// methods of the GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/
-type RepositoriesService service
-
-// Repository represents a GitHub repository.
-type Repository struct {
- ID *int `json:"id,omitempty"`
- Owner *User `json:"owner,omitempty"`
- Name *string `json:"name,omitempty"`
- FullName *string `json:"full_name,omitempty"`
- Description *string `json:"description,omitempty"`
- Homepage *string `json:"homepage,omitempty"`
- DefaultBranch *string `json:"default_branch,omitempty"`
- MasterBranch *string `json:"master_branch,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- PushedAt *Timestamp `json:"pushed_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- CloneURL *string `json:"clone_url,omitempty"`
- GitURL *string `json:"git_url,omitempty"`
- MirrorURL *string `json:"mirror_url,omitempty"`
- SSHURL *string `json:"ssh_url,omitempty"`
- SVNURL *string `json:"svn_url,omitempty"`
- Language *string `json:"language,omitempty"`
- Fork *bool `json:"fork"`
- ForksCount *int `json:"forks_count,omitempty"`
- NetworkCount *int `json:"network_count,omitempty"`
- OpenIssuesCount *int `json:"open_issues_count,omitempty"`
- StargazersCount *int `json:"stargazers_count,omitempty"`
- SubscribersCount *int `json:"subscribers_count,omitempty"`
- WatchersCount *int `json:"watchers_count,omitempty"`
- Size *int `json:"size,omitempty"`
- AutoInit *bool `json:"auto_init,omitempty"`
- Parent *Repository `json:"parent,omitempty"`
- Source *Repository `json:"source,omitempty"`
- Organization *Organization `json:"organization,omitempty"`
- Permissions *map[string]bool `json:"permissions,omitempty"`
-
- // Only provided when using RepositoriesService.Get while in preview
- License *License `json:"license,omitempty"`
-
- // Additional mutable fields when creating and editing a repository
- Private *bool `json:"private"`
- HasIssues *bool `json:"has_issues"`
- HasWiki *bool `json:"has_wiki"`
- HasDownloads *bool `json:"has_downloads"`
- // Creating an organization repository. Required for non-owners.
- TeamID *int `json:"team_id"`
-
- // API URLs
- URL *string `json:"url,omitempty"`
- ArchiveURL *string `json:"archive_url,omitempty"`
- AssigneesURL *string `json:"assignees_url,omitempty"`
- BlobsURL *string `json:"blobs_url,omitempty"`
- BranchesURL *string `json:"branches_url,omitempty"`
- CollaboratorsURL *string `json:"collaborators_url,omitempty"`
- CommentsURL *string `json:"comments_url,omitempty"`
- CommitsURL *string `json:"commits_url,omitempty"`
- CompareURL *string `json:"compare_url,omitempty"`
- ContentsURL *string `json:"contents_url,omitempty"`
- ContributorsURL *string `json:"contributors_url,omitempty"`
- DownloadsURL *string `json:"downloads_url,omitempty"`
- EventsURL *string `json:"events_url,omitempty"`
- ForksURL *string `json:"forks_url,omitempty"`
- GitCommitsURL *string `json:"git_commits_url,omitempty"`
- GitRefsURL *string `json:"git_refs_url,omitempty"`
- GitTagsURL *string `json:"git_tags_url,omitempty"`
- HooksURL *string `json:"hooks_url,omitempty"`
- IssueCommentURL *string `json:"issue_comment_url,omitempty"`
- IssueEventsURL *string `json:"issue_events_url,omitempty"`
- IssuesURL *string `json:"issues_url,omitempty"`
- KeysURL *string `json:"keys_url,omitempty"`
- LabelsURL *string `json:"labels_url,omitempty"`
- LanguagesURL *string `json:"languages_url,omitempty"`
- MergesURL *string `json:"merges_url,omitempty"`
- MilestonesURL *string `json:"milestones_url,omitempty"`
- NotificationsURL *string `json:"notifications_url,omitempty"`
- PullsURL *string `json:"pulls_url,omitempty"`
- ReleasesURL *string `json:"releases_url,omitempty"`
- StargazersURL *string `json:"stargazers_url,omitempty"`
- StatusesURL *string `json:"statuses_url,omitempty"`
- SubscribersURL *string `json:"subscribers_url,omitempty"`
- SubscriptionURL *string `json:"subscription_url,omitempty"`
- TagsURL *string `json:"tags_url,omitempty"`
- TreesURL *string `json:"trees_url,omitempty"`
- TeamsURL *string `json:"teams_url,omitempty"`
-
- // TextMatches is only populated from search results that request text matches
- // See: search.go and https://developer.github.com/v3/search/#text-match-metadata
- TextMatches []TextMatch `json:"text_matches,omitempty"`
-}
-
-func (r Repository) String() string {
- return Stringify(r)
-}
-
-// RepositoryListOptions specifies the optional parameters to the
-// RepositoriesService.List method.
-type RepositoryListOptions struct {
- // Visibility of repositories to list. Can be one of all, public, or private.
- // Default: all
- Visibility string `url:"visibility,omitempty"`
-
- // List repos of given affiliation[s].
- // Comma-separated list of values. Can include:
- // * owner: Repositories that are owned by the authenticated user.
- // * collaborator: Repositories that the user has been added to as a
- // collaborator.
- // * organization_member: Repositories that the user has access to through
- // being a member of an organization. This includes every repository on
- // every team that the user is on.
- // Default: owner,collaborator,organization_member
- Affiliation string `url:"affiliation,omitempty"`
-
- // Type of repositories to list.
- // Can be one of all, owner, public, private, member. Default: all
- // Will cause a 422 error if used in the same request as visibility or
- // affiliation.
- Type string `url:"type,omitempty"`
-
- // How to sort the repository list. Can be one of created, updated, pushed,
- // full_name. Default: full_name
- Sort string `url:"sort,omitempty"`
-
- // Direction in which to sort repositories. Can be one of asc or desc.
- // Default: when using full_name: asc; otherwise desc
- Direction string `url:"direction,omitempty"`
-
- ListOptions
-}
-
-// List the repositories for a user. Passing the empty string will list
-// repositories for the authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/#list-user-repositories
-func (s *RepositoriesService) List(user string, opt *RepositoryListOptions) ([]*Repository, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("users/%v/repos", user)
- } else {
- u = "user/repos"
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when license support fully launches
- req.Header.Set("Accept", mediaTypeLicensesPreview)
-
- repos := new([]*Repository)
- resp, err := s.client.Do(req, repos)
- if err != nil {
- return nil, resp, err
- }
-
- return *repos, resp, err
-}
-
-// RepositoryListByOrgOptions specifies the optional parameters to the
-// RepositoriesService.ListByOrg method.
-type RepositoryListByOrgOptions struct {
- // Type of repositories to list. Possible values are: all, public, private,
- // forks, sources, member. Default is "all".
- Type string `url:"type,omitempty"`
-
- ListOptions
-}
-
-// ListByOrg lists the repositories for an organization.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/#list-organization-repositories
-func (s *RepositoriesService) ListByOrg(org string, opt *RepositoryListByOrgOptions) ([]*Repository, *Response, error) {
- u := fmt.Sprintf("orgs/%v/repos", org)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when license support fully launches
- req.Header.Set("Accept", mediaTypeLicensesPreview)
-
- repos := new([]*Repository)
- resp, err := s.client.Do(req, repos)
- if err != nil {
- return nil, resp, err
- }
-
- return *repos, resp, err
-}
-
-// RepositoryListAllOptions specifies the optional parameters to the
-// RepositoriesService.ListAll method.
-type RepositoryListAllOptions struct {
- // ID of the last repository seen
- Since int `url:"since,omitempty"`
-
- ListOptions
-}
-
-// ListAll lists all GitHub repositories in the order that they were created.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/#list-all-public-repositories
-func (s *RepositoriesService) ListAll(opt *RepositoryListAllOptions) ([]*Repository, *Response, error) {
- u, err := addOptions("repositories", opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- repos := new([]*Repository)
- resp, err := s.client.Do(req, repos)
- if err != nil {
- return nil, resp, err
- }
-
- return *repos, resp, err
-}
-
-// Create a new repository. If an organization is specified, the new
-// repository will be created under that org. If the empty string is
-// specified, it will be created for the authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/#create
-func (s *RepositoriesService) Create(org string, repo *Repository) (*Repository, *Response, error) {
- var u string
- if org != "" {
- u = fmt.Sprintf("orgs/%v/repos", org)
- } else {
- u = "user/repos"
- }
-
- req, err := s.client.NewRequest("POST", u, repo)
- if err != nil {
- return nil, nil, err
- }
-
- r := new(Repository)
- resp, err := s.client.Do(req, r)
- if err != nil {
- return nil, resp, err
- }
-
- return r, resp, err
-}
-
-// Get fetches a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/#get
-func (s *RepositoriesService) Get(owner, repo string) (*Repository, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when the license support fully launches
- // https://developer.github.com/v3/licenses/#get-a-repositorys-license
- req.Header.Set("Accept", mediaTypeLicensesPreview)
-
- repository := new(Repository)
- resp, err := s.client.Do(req, repository)
- if err != nil {
- return nil, resp, err
- }
-
- return repository, resp, err
-}
-
-// GetByID fetches a repository.
-//
-// Note: GetByID uses the undocumented GitHub API endpoint /repositories/:id.
-func (s *RepositoriesService) GetByID(id int) (*Repository, *Response, error) {
- u := fmt.Sprintf("repositories/%d", id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when the license support fully launches
- // https://developer.github.com/v3/licenses/#get-a-repositorys-license
- req.Header.Set("Accept", mediaTypeLicensesPreview)
-
- repository := new(Repository)
- resp, err := s.client.Do(req, repository)
- if err != nil {
- return nil, resp, err
- }
-
- return repository, resp, err
-}
-
-// Edit updates a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/#edit
-func (s *RepositoriesService) Edit(owner, repo string, repository *Repository) (*Repository, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v", owner, repo)
- req, err := s.client.NewRequest("PATCH", u, repository)
- if err != nil {
- return nil, nil, err
- }
-
- r := new(Repository)
- resp, err := s.client.Do(req, r)
- if err != nil {
- return nil, resp, err
- }
-
- return r, resp, err
-}
-
-// Delete a repository.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/#delete-a-repository
-func (s *RepositoriesService) Delete(owner, repo string) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v", owner, repo)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// Contributor represents a repository contributor
-type Contributor struct {
- Login *string `json:"login,omitempty"`
- ID *int `json:"id,omitempty"`
- AvatarURL *string `json:"avatar_url,omitempty"`
- GravatarID *string `json:"gravatar_id,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- FollowersURL *string `json:"followers_url,omitempty"`
- FollowingURL *string `json:"following_url,omitempty"`
- GistsURL *string `json:"gists_url,omitempty"`
- StarredURL *string `json:"starred_url,omitempty"`
- SubscriptionsURL *string `json:"subscriptions_url,omitempty"`
- OrganizationsURL *string `json:"organizations_url,omitempty"`
- ReposURL *string `json:"repos_url,omitempty"`
- EventsURL *string `json:"events_url,omitempty"`
- ReceivedEventsURL *string `json:"received_events_url,omitempty"`
- Type *string `json:"type,omitempty"`
- SiteAdmin *bool `json:"site_admin"`
- Contributions *int `json:"contributions,omitempty"`
-}
-
-// ListContributorsOptions specifies the optional parameters to the
-// RepositoriesService.ListContributors method.
-type ListContributorsOptions struct {
- // Include anonymous contributors in results or not
- Anon string `url:"anon,omitempty"`
-
- ListOptions
-}
-
-// ListContributors lists contributors for a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/#list-contributors
-func (s *RepositoriesService) ListContributors(owner string, repository string, opt *ListContributorsOptions) ([]*Contributor, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/contributors", owner, repository)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- contributor := new([]*Contributor)
- resp, err := s.client.Do(req, contributor)
- if err != nil {
- return nil, nil, err
- }
-
- return *contributor, resp, err
-}
-
-// ListLanguages lists languages for the specified repository. The returned map
-// specifies the languages and the number of bytes of code written in that
-// language. For example:
-//
-// {
-// "C": 78769,
-// "Python": 7769
-// }
-//
-// GitHub API Docs: http://developer.github.com/v3/repos/#list-languages
-func (s *RepositoriesService) ListLanguages(owner string, repo string) (map[string]int, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/languages", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- languages := make(map[string]int)
- resp, err := s.client.Do(req, &languages)
- if err != nil {
- return nil, resp, err
- }
-
- return languages, resp, err
-}
-
-// ListTeams lists the teams for the specified repository.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/#list-teams
-func (s *RepositoriesService) ListTeams(owner string, repo string, opt *ListOptions) ([]*Team, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/teams", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- teams := new([]*Team)
- resp, err := s.client.Do(req, teams)
- if err != nil {
- return nil, resp, err
- }
-
- return *teams, resp, err
-}
-
-// RepositoryTag represents a repository tag.
-type RepositoryTag struct {
- Name *string `json:"name,omitempty"`
- Commit *Commit `json:"commit,omitempty"`
- ZipballURL *string `json:"zipball_url,omitempty"`
- TarballURL *string `json:"tarball_url,omitempty"`
-}
-
-// ListTags lists tags for the specified repository.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/#list-tags
-func (s *RepositoriesService) ListTags(owner string, repo string, opt *ListOptions) ([]*RepositoryTag, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/tags", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- tags := new([]*RepositoryTag)
- resp, err := s.client.Do(req, tags)
- if err != nil {
- return nil, resp, err
- }
-
- return *tags, resp, err
-}
-
-// Branch represents a repository branch
-type Branch struct {
- Name *string `json:"name,omitempty"`
- Commit *Commit `json:"commit,omitempty"`
- Protection *Protection `json:"protection,omitempty"`
-}
-
-// Protection represents a repository branch's protection
-type Protection struct {
- Enabled *bool `json:"enabled,omitempty"`
- RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks,omitempty"`
-}
-
-// RequiredStatusChecks represents the protection status of a individual branch
-type RequiredStatusChecks struct {
- // Who required status checks apply to.
- // Possible values are:
- // off
- // non_admins
- // everyone
- EnforcementLevel *string `json:"enforcement_level,omitempty"`
- // The list of status checks which are required
- Contexts *[]string `json:"contexts,omitempty"`
-}
-
-// ListBranches lists branches for the specified repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/#list-branches
-func (s *RepositoriesService) ListBranches(owner string, repo string, opt *ListOptions) ([]*Branch, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/branches", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
-
- branches := new([]*Branch)
- resp, err := s.client.Do(req, branches)
- if err != nil {
- return nil, resp, err
- }
-
- return *branches, resp, err
-}
-
-// GetBranch gets the specified branch for a repository.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/#get-branch
-func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/branches/%v", owner, repo, branch)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
-
- b := new(Branch)
- resp, err := s.client.Do(req, b)
- if err != nil {
- return nil, resp, err
- }
-
- return b, resp, err
-}
-
-// EditBranch edits the branch (currently only Branch Protection)
-//
-// GitHub API docs: https://developer.github.com/v3/repos/#enabling-and-disabling-branch-protection
-func (s *RepositoriesService) EditBranch(owner, repo, branchName string, branch *Branch) (*Branch, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/branches/%v", owner, repo, branchName)
- req, err := s.client.NewRequest("PATCH", u, branch)
- if err != nil {
- return nil, nil, err
- }
-
- req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
-
- b := new(Branch)
- resp, err := s.client.Do(req, b)
- if err != nil {
- return nil, resp, err
- }
-
- return b, resp, err
-}
-
-// License gets the contents of a repository's license if one is detected.
-//
-// GitHub API docs: https://developer.github.com/v3/licenses/#get-the-contents-of-a-repositorys-license
-func (s *RepositoriesService) License(owner, repo string) (*License, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/license", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- r := &Repository{}
- resp, err := s.client.Do(req, r)
- if err != nil {
- return nil, resp, err
- }
-
- return r.License, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_collaborators.go b/vendor/src/github.com/google/go-github/github/repos_collaborators.go
deleted file mode 100644
index 68a9f46..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_collaborators.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// ListCollaborators lists the Github users that have access to the repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/collaborators/#list
-func (s *RepositoriesService) ListCollaborators(owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/collaborators", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- users := new([]*User)
- resp, err := s.client.Do(req, users)
- if err != nil {
- return nil, resp, err
- }
-
- return *users, resp, err
-}
-
-// IsCollaborator checks whether the specified Github user has collaborator
-// access to the given repo.
-// Note: This will return false if the user is not a collaborator OR the user
-// is not a GitHub user.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/collaborators/#get
-func (s *RepositoriesService) IsCollaborator(owner, repo, user string) (bool, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return false, nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- isCollab, err := parseBoolResponse(err)
- return isCollab, resp, err
-}
-
-// RepositoryAddCollaboratorOptions specifies the optional parameters to the
-// RepositoriesService.AddCollaborator method.
-type RepositoryAddCollaboratorOptions struct {
- // Permission specifies the permission to grant the user on this repository.
- // Possible values are:
- // pull - team members can pull, but not push to or administer this repository
- // push - team members can pull and push, but not administer this repository
- // admin - team members can pull, push and administer this repository
- //
- // Default value is "push". This option is only valid for organization-owned repositories.
- Permission string `json:"permission,omitempty"`
-}
-
-// AddCollaborator adds the specified Github user as collaborator to the given repo.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#add-user-as-a-collaborator
-func (s *RepositoriesService) AddCollaborator(owner, repo, user string, opt *RepositoryAddCollaboratorOptions) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user)
- req, err := s.client.NewRequest("PUT", u, opt)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
-
- return s.client.Do(req, nil)
-}
-
-// RemoveCollaborator removes the specified Github user as collaborator from the given repo.
-// Note: Does not return error if a valid user that is not a collaborator is removed.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/collaborators/#remove-collaborator
-func (s *RepositoriesService) RemoveCollaborator(owner, repo, user string) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_collaborators_test.go b/vendor/src/github.com/google/go-github/github/repos_collaborators_test.go
deleted file mode 100644
index 75e3cf3..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_collaborators_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_ListCollaborators(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/collaborators", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprintf(w, `[{"id":1}, {"id":2}]`)
- })
-
- opt := &ListOptions{Page: 2}
- users, _, err := client.Repositories.ListCollaborators("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListCollaborators returned error: %v", err)
- }
-
- want := []*User{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(users, want) {
- t.Errorf("Repositories.ListCollaborators returned %+v, want %+v", users, want)
- }
-}
-
-func TestRepositoriesService_ListCollaborators_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.ListCollaborators("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_IsCollaborator_True(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNoContent)
- })
-
- isCollab, _, err := client.Repositories.IsCollaborator("o", "r", "u")
- if err != nil {
- t.Errorf("Repositories.IsCollaborator returned error: %v", err)
- }
-
- if !isCollab {
- t.Errorf("Repositories.IsCollaborator returned false, want true")
- }
-}
-
-func TestRepositoriesService_IsCollaborator_False(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNotFound)
- })
-
- isCollab, _, err := client.Repositories.IsCollaborator("o", "r", "u")
- if err != nil {
- t.Errorf("Repositories.IsCollaborator returned error: %v", err)
- }
-
- if isCollab {
- t.Errorf("Repositories.IsCollaborator returned true, want false")
- }
-}
-
-func TestRepositoriesService_IsCollaborator_invalidUser(t *testing.T) {
- _, _, err := client.Repositories.IsCollaborator("%", "%", "%")
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_AddCollaborator(t *testing.T) {
- setup()
- defer teardown()
-
- opt := &RepositoryAddCollaboratorOptions{Permission: "admin"}
-
- mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) {
- v := new(RepositoryAddCollaboratorOptions)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PUT")
- testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview)
- if !reflect.DeepEqual(v, opt) {
- t.Errorf("Request body = %+v, want %+v", v, opt)
- }
-
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Repositories.AddCollaborator("o", "r", "u", opt)
- if err != nil {
- t.Errorf("Repositories.AddCollaborator returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_AddCollaborator_invalidUser(t *testing.T) {
- _, err := client.Repositories.AddCollaborator("%", "%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_RemoveCollaborator(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/collaborators/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Repositories.RemoveCollaborator("o", "r", "u")
- if err != nil {
- t.Errorf("Repositories.RemoveCollaborator returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_RemoveCollaborator_invalidUser(t *testing.T) {
- _, err := client.Repositories.RemoveCollaborator("%", "%", "%")
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_comments.go b/vendor/src/github.com/google/go-github/github/repos_comments.go
deleted file mode 100644
index 34a8d02..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_comments.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// RepositoryComment represents a comment for a commit, file, or line in a repository.
-type RepositoryComment struct {
- HTMLURL *string `json:"html_url,omitempty"`
- URL *string `json:"url,omitempty"`
- ID *int `json:"id,omitempty"`
- CommitID *string `json:"commit_id,omitempty"`
- User *User `json:"user,omitempty"`
- Reactions *Reactions `json:"reactions,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
-
- // User-mutable fields
- Body *string `json:"body"`
- // User-initialized fields
- Path *string `json:"path,omitempty"`
- Position *int `json:"position,omitempty"`
-}
-
-func (r RepositoryComment) String() string {
- return Stringify(r)
-}
-
-// ListComments lists all the comments for the repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/comments/#list-commit-comments-for-a-repository
-func (s *RepositoriesService) ListComments(owner, repo string, opt *ListOptions) ([]*RepositoryComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/comments", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- comments := new([]*RepositoryComment)
- resp, err := s.client.Do(req, comments)
- if err != nil {
- return nil, resp, err
- }
-
- return *comments, resp, err
-}
-
-// ListCommitComments lists all the comments for a given commit SHA.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/comments/#list-comments-for-a-single-commit
-func (s *RepositoriesService) ListCommitComments(owner, repo, sha string, opt *ListOptions) ([]*RepositoryComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- comments := new([]*RepositoryComment)
- resp, err := s.client.Do(req, comments)
- if err != nil {
- return nil, resp, err
- }
-
- return *comments, resp, err
-}
-
-// CreateComment creates a comment for the given commit.
-// Note: GitHub allows for comments to be created for non-existing files and positions.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/comments/#create-a-commit-comment
-func (s *RepositoriesService) CreateComment(owner, repo, sha string, comment *RepositoryComment) (*RepositoryComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha)
- req, err := s.client.NewRequest("POST", u, comment)
- if err != nil {
- return nil, nil, err
- }
-
- c := new(RepositoryComment)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// GetComment gets a single comment from a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/comments/#get-a-single-commit-comment
-func (s *RepositoriesService) GetComment(owner, repo string, id int) (*RepositoryComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeReactionsPreview)
-
- c := new(RepositoryComment)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// UpdateComment updates the body of a single comment.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/comments/#update-a-commit-comment
-func (s *RepositoriesService) UpdateComment(owner, repo string, id int, comment *RepositoryComment) (*RepositoryComment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id)
- req, err := s.client.NewRequest("PATCH", u, comment)
- if err != nil {
- return nil, nil, err
- }
-
- c := new(RepositoryComment)
- resp, err := s.client.Do(req, c)
- if err != nil {
- return nil, resp, err
- }
-
- return c, resp, err
-}
-
-// DeleteComment deletes a single comment from a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/comments/#delete-a-commit-comment
-func (s *RepositoriesService) DeleteComment(owner, repo string, id int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_comments_test.go b/vendor/src/github.com/google/go-github/github/repos_comments_test.go
deleted file mode 100644
index 924a9a4..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_comments_test.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_ListComments(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/comments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
- })
-
- opt := &ListOptions{Page: 2}
- comments, _, err := client.Repositories.ListComments("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListComments returned error: %v", err)
- }
-
- want := []*RepositoryComment{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(comments, want) {
- t.Errorf("Repositories.ListComments returned %+v, want %+v", comments, want)
- }
-}
-
-func TestRepositoriesService_ListComments_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.ListComments("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_ListCommitComments(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/commits/s/comments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
- })
-
- opt := &ListOptions{Page: 2}
- comments, _, err := client.Repositories.ListCommitComments("o", "r", "s", opt)
- if err != nil {
- t.Errorf("Repositories.ListCommitComments returned error: %v", err)
- }
-
- want := []*RepositoryComment{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(comments, want) {
- t.Errorf("Repositories.ListCommitComments returned %+v, want %+v", comments, want)
- }
-}
-
-func TestRepositoriesService_ListCommitComments_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.ListCommitComments("%", "%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_CreateComment(t *testing.T) {
- setup()
- defer teardown()
-
- input := &RepositoryComment{Body: String("b")}
-
- mux.HandleFunc("/repos/o/r/commits/s/comments", func(w http.ResponseWriter, r *http.Request) {
- v := new(RepositoryComment)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.Repositories.CreateComment("o", "r", "s", input)
- if err != nil {
- t.Errorf("Repositories.CreateComment returned error: %v", err)
- }
-
- want := &RepositoryComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("Repositories.CreateComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestRepositoriesService_CreateComment_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.CreateComment("%", "%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_GetComment(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/comments/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeReactionsPreview)
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.Repositories.GetComment("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.GetComment returned error: %v", err)
- }
-
- want := &RepositoryComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("Repositories.GetComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestRepositoriesService_GetComment_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.GetComment("%", "%", 1)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_UpdateComment(t *testing.T) {
- setup()
- defer teardown()
-
- input := &RepositoryComment{Body: String("b")}
-
- mux.HandleFunc("/repos/o/r/comments/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(RepositoryComment)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- comment, _, err := client.Repositories.UpdateComment("o", "r", 1, input)
- if err != nil {
- t.Errorf("Repositories.UpdateComment returned error: %v", err)
- }
-
- want := &RepositoryComment{ID: Int(1)}
- if !reflect.DeepEqual(comment, want) {
- t.Errorf("Repositories.UpdateComment returned %+v, want %+v", comment, want)
- }
-}
-
-func TestRepositoriesService_UpdateComment_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.UpdateComment("%", "%", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_DeleteComment(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/comments/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Repositories.DeleteComment("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.DeleteComment returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_DeleteComment_invalidOwner(t *testing.T) {
- _, err := client.Repositories.DeleteComment("%", "%", 1)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_commits.go b/vendor/src/github.com/google/go-github/github/repos_commits.go
deleted file mode 100644
index b5e6856..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_commits.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "bytes"
- "fmt"
- "time"
-)
-
-// RepositoryCommit represents a commit in a repo.
-// Note that it's wrapping a Commit, so author/committer information is in two places,
-// but contain different details about them: in RepositoryCommit "github details", in Commit - "git details".
-type RepositoryCommit struct {
- SHA *string `json:"sha,omitempty"`
- Commit *Commit `json:"commit,omitempty"`
- Author *User `json:"author,omitempty"`
- Committer *User `json:"committer,omitempty"`
- Parents []Commit `json:"parents,omitempty"`
- Message *string `json:"message,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
-
- // Details about how many changes were made in this commit. Only filled in during GetCommit!
- Stats *CommitStats `json:"stats,omitempty"`
- // Details about which files, and how this commit touched. Only filled in during GetCommit!
- Files []CommitFile `json:"files,omitempty"`
-}
-
-func (r RepositoryCommit) String() string {
- return Stringify(r)
-}
-
-// CommitStats represents the number of additions / deletions from a file in a given RepositoryCommit or GistCommit.
-type CommitStats struct {
- Additions *int `json:"additions,omitempty"`
- Deletions *int `json:"deletions,omitempty"`
- Total *int `json:"total,omitempty"`
-}
-
-func (c CommitStats) String() string {
- return Stringify(c)
-}
-
-// CommitFile represents a file modified in a commit.
-type CommitFile struct {
- SHA *string `json:"sha,omitempty"`
- Filename *string `json:"filename,omitempty"`
- Additions *int `json:"additions,omitempty"`
- Deletions *int `json:"deletions,omitempty"`
- Changes *int `json:"changes,omitempty"`
- Status *string `json:"status,omitempty"`
- Patch *string `json:"patch,omitempty"`
-}
-
-func (c CommitFile) String() string {
- return Stringify(c)
-}
-
-// CommitsComparison is the result of comparing two commits.
-// See CompareCommits() for details.
-type CommitsComparison struct {
- BaseCommit *RepositoryCommit `json:"base_commit,omitempty"`
- MergeBaseCommit *RepositoryCommit `json:"merge_base_commit,omitempty"`
-
- // Head can be 'behind' or 'ahead'
- Status *string `json:"status,omitempty"`
- AheadBy *int `json:"ahead_by,omitempty"`
- BehindBy *int `json:"behind_by,omitempty"`
- TotalCommits *int `json:"total_commits,omitempty"`
-
- Commits []RepositoryCommit `json:"commits,omitempty"`
-
- Files []CommitFile `json:"files,omitempty"`
-}
-
-func (c CommitsComparison) String() string {
- return Stringify(c)
-}
-
-// CommitsListOptions specifies the optional parameters to the
-// RepositoriesService.ListCommits method.
-type CommitsListOptions struct {
- // SHA or branch to start listing Commits from.
- SHA string `url:"sha,omitempty"`
-
- // Path that should be touched by the returned Commits.
- Path string `url:"path,omitempty"`
-
- // Author of by which to filter Commits.
- Author string `url:"author,omitempty"`
-
- // Since when should Commits be included in the response.
- Since time.Time `url:"since,omitempty"`
-
- // Until when should Commits be included in the response.
- Until time.Time `url:"until,omitempty"`
-
- ListOptions
-}
-
-// ListCommits lists the commits of a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/commits/#list
-func (s *RepositoriesService) ListCommits(owner, repo string, opt *CommitsListOptions) ([]*RepositoryCommit, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/commits", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- commits := new([]*RepositoryCommit)
- resp, err := s.client.Do(req, commits)
- if err != nil {
- return nil, resp, err
- }
-
- return *commits, resp, err
-}
-
-// GetCommit fetches the specified commit, including all details about it.
-// todo: support media formats - https://github.com/google/go-github/issues/6
-//
-// GitHub API docs: http://developer.github.com/v3/repos/commits/#get-a-single-commit
-// See also: http://developer.github.com//v3/git/commits/#get-a-single-commit provides the same functionality
-func (s *RepositoriesService) GetCommit(owner, repo, sha string) (*RepositoryCommit, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, sha)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeGitSigningPreview)
-
- commit := new(RepositoryCommit)
- resp, err := s.client.Do(req, commit)
- if err != nil {
- return nil, resp, err
- }
-
- return commit, resp, err
-}
-
-// GetCommitSHA1 gets the SHA-1 of a commit reference. If a last-known SHA1 is
-// supplied and no new commits have occurred, a 304 Unmodified response is returned.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
-func (s *RepositoriesService) GetCommitSHA1(owner, repo, ref, lastSHA string) (string, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, ref)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return "", nil, err
- }
- if lastSHA != "" {
- req.Header.Set("If-None-Match", `"`+lastSHA+`"`)
- }
-
- req.Header.Set("Accept", mediaTypeV3SHA)
-
- var buf bytes.Buffer
- resp, err := s.client.Do(req, &buf)
- if err != nil {
- return "", resp, err
- }
-
- return buf.String(), resp, err
-}
-
-// CompareCommits compares a range of commits with each other.
-// todo: support media formats - https://github.com/google/go-github/issues/6
-//
-// GitHub API docs: http://developer.github.com/v3/repos/commits/index.html#compare-two-commits
-func (s *RepositoriesService) CompareCommits(owner, repo string, base, head string) (*CommitsComparison, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/compare/%v...%v", owner, repo, base, head)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- comp := new(CommitsComparison)
- resp, err := s.client.Do(req, comp)
- if err != nil {
- return nil, resp, err
- }
-
- return comp, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_commits_test.go b/vendor/src/github.com/google/go-github/github/repos_commits_test.go
deleted file mode 100644
index 771cd9f..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_commits_test.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
- "time"
-)
-
-func TestRepositoriesService_ListCommits(t *testing.T) {
- setup()
- defer teardown()
-
- // given
- mux.HandleFunc("/repos/o/r/commits", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r,
- values{
- "sha": "s",
- "path": "p",
- "author": "a",
- "since": "2013-08-01T00:00:00Z",
- "until": "2013-09-03T00:00:00Z",
- })
- fmt.Fprintf(w, `[{"sha": "s"}]`)
- })
-
- opt := &CommitsListOptions{
- SHA: "s",
- Path: "p",
- Author: "a",
- Since: time.Date(2013, time.August, 1, 0, 0, 0, 0, time.UTC),
- Until: time.Date(2013, time.September, 3, 0, 0, 0, 0, time.UTC),
- }
- commits, _, err := client.Repositories.ListCommits("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListCommits returned error: %v", err)
- }
-
- want := []*RepositoryCommit{{SHA: String("s")}}
- if !reflect.DeepEqual(commits, want) {
- t.Errorf("Repositories.ListCommits returned %+v, want %+v", commits, want)
- }
-}
-
-func TestRepositoriesService_GetCommit(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/commits/s", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeGitSigningPreview)
- fmt.Fprintf(w, `{
- "sha": "s",
- "commit": { "message": "m" },
- "author": { "login": "l" },
- "committer": { "login": "l" },
- "parents": [ { "sha": "s" } ],
- "stats": { "additions": 104, "deletions": 4, "total": 108 },
- "files": [
- {
- "filename": "f",
- "additions": 10,
- "deletions": 2,
- "changes": 12,
- "status": "s",
- "raw_url": "r",
- "blob_url": "b",
- "patch": "p"
- }
- ]
- }`)
- })
-
- commit, _, err := client.Repositories.GetCommit("o", "r", "s")
- if err != nil {
- t.Errorf("Repositories.GetCommit returned error: %v", err)
- }
-
- want := &RepositoryCommit{
- SHA: String("s"),
- Commit: &Commit{
- Message: String("m"),
- },
- Author: &User{
- Login: String("l"),
- },
- Committer: &User{
- Login: String("l"),
- },
- Parents: []Commit{
- {
- SHA: String("s"),
- },
- },
- Stats: &CommitStats{
- Additions: Int(104),
- Deletions: Int(4),
- Total: Int(108),
- },
- Files: []CommitFile{
- {
- Filename: String("f"),
- Additions: Int(10),
- Deletions: Int(2),
- Changes: Int(12),
- Status: String("s"),
- Patch: String("p"),
- },
- },
- }
- if !reflect.DeepEqual(commit, want) {
- t.Errorf("Repositories.GetCommit returned \n%+v, want \n%+v", commit, want)
- }
-}
-
-func TestRepositoriesService_GetCommitSHA1(t *testing.T) {
- setup()
- defer teardown()
- const sha1 = "01234abcde"
-
- mux.HandleFunc("/repos/o/r/commits/master", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeV3SHA)
-
- fmt.Fprintf(w, sha1)
- })
-
- got, _, err := client.Repositories.GetCommitSHA1("o", "r", "master", "")
- if err != nil {
- t.Errorf("Repositories.GetCommitSHA1 returned error: %v", err)
- }
-
- want := sha1
- if got != want {
- t.Errorf("Repositories.GetCommitSHA1 = %v, want %v", got, want)
- }
-
- mux.HandleFunc("/repos/o/r/commits/tag", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeV3SHA)
- testHeader(t, r, "If-None-Match", `"`+sha1+`"`)
-
- w.WriteHeader(http.StatusNotModified)
- })
-
- got, _, err = client.Repositories.GetCommitSHA1("o", "r", "tag", sha1)
- if err == nil {
- t.Errorf("Expected HTTP 304 response")
- }
-
- want = ""
- if got != want {
- t.Errorf("Repositories.GetCommitSHA1 = %v, want %v", got, want)
- }
-}
-
-func TestRepositoriesService_CompareCommits(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/compare/b...h", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprintf(w, `{
- "base_commit": {
- "sha": "s",
- "commit": {
- "author": { "name": "n" },
- "committer": { "name": "n" },
- "message": "m",
- "tree": { "sha": "t" }
- },
- "author": { "login": "n" },
- "committer": { "login": "l" },
- "parents": [ { "sha": "s" } ]
- },
- "status": "s",
- "ahead_by": 1,
- "behind_by": 2,
- "total_commits": 1,
- "commits": [
- {
- "sha": "s",
- "commit": { "author": { "name": "n" } },
- "author": { "login": "l" },
- "committer": { "login": "l" },
- "parents": [ { "sha": "s" } ]
- }
- ],
- "files": [ { "filename": "f" } ]
- }`)
- })
-
- got, _, err := client.Repositories.CompareCommits("o", "r", "b", "h")
- if err != nil {
- t.Errorf("Repositories.CompareCommits returned error: %v", err)
- }
-
- want := &CommitsComparison{
- Status: String("s"),
- AheadBy: Int(1),
- BehindBy: Int(2),
- TotalCommits: Int(1),
- BaseCommit: &RepositoryCommit{
- Commit: &Commit{
- Author: &CommitAuthor{Name: String("n")},
- },
- Author: &User{Login: String("l")},
- Committer: &User{Login: String("l")},
- Message: String("m"),
- },
- Commits: []RepositoryCommit{
- {
- SHA: String("s"),
- },
- },
- Files: []CommitFile{
- {
- Filename: String("f"),
- },
- },
- }
-
- if reflect.DeepEqual(got, want) {
- t.Errorf("Repositories.CompareCommits returned \n%+v, want \n%+v", got, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_contents.go b/vendor/src/github.com/google/go-github/github/repos_contents.go
deleted file mode 100644
index ebf4d04..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_contents.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Repository contents API methods.
-// http://developer.github.com/v3/repos/contents/
-
-package github
-
-import (
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "path"
-)
-
-// RepositoryContent represents a file or directory in a github repository.
-type RepositoryContent struct {
- Type *string `json:"type,omitempty"`
- Encoding *string `json:"encoding,omitempty"`
- Size *int `json:"size,omitempty"`
- Name *string `json:"name,omitempty"`
- Path *string `json:"path,omitempty"`
- // Content contains the actual file content, which may be encoded.
- // Callers should call GetContent which will decode the content if
- // necessary.
- Content *string `json:"content,omitempty"`
- SHA *string `json:"sha,omitempty"`
- URL *string `json:"url,omitempty"`
- GitURL *string `json:"git_url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- DownloadURL *string `json:"download_url,omitempty"`
-}
-
-// RepositoryContentResponse holds the parsed response from CreateFile, UpdateFile, and DeleteFile.
-type RepositoryContentResponse struct {
- Content *RepositoryContent `json:"content,omitempty"`
- Commit `json:"commit,omitempty"`
-}
-
-// RepositoryContentFileOptions specifies optional parameters for CreateFile, UpdateFile, and DeleteFile.
-type RepositoryContentFileOptions struct {
- Message *string `json:"message,omitempty"`
- Content []byte `json:"content,omitempty"` // unencoded
- SHA *string `json:"sha,omitempty"`
- Branch *string `json:"branch,omitempty"`
- Author *CommitAuthor `json:"author,omitempty"`
- Committer *CommitAuthor `json:"committer,omitempty"`
-}
-
-// RepositoryContentGetOptions represents an optional ref parameter, which can be a SHA,
-// branch, or tag
-type RepositoryContentGetOptions struct {
- Ref string `url:"ref,omitempty"`
-}
-
-// String converts RepositoryContent to a string. It's primarily for testing.
-func (r RepositoryContent) String() string {
- return Stringify(r)
-}
-
-// Decode decodes the file content if it is base64 encoded.
-//
-// Deprecated: Use GetContent instead.
-func (r *RepositoryContent) Decode() ([]byte, error) {
- if *r.Encoding != "base64" {
- return nil, errors.New("cannot decode non-base64")
- }
- o, err := base64.StdEncoding.DecodeString(*r.Content)
- if err != nil {
- return nil, err
- }
- return o, nil
-}
-
-// GetContent returns the content of r, decoding it if necessary.
-func (r *RepositoryContent) GetContent() (string, error) {
- var encoding string
- if r.Encoding != nil {
- encoding = *r.Encoding
- }
-
- switch encoding {
- case "base64":
- c, err := base64.StdEncoding.DecodeString(*r.Content)
- return string(c), err
- case "":
- if r.Content == nil {
- return "", nil
- }
- return *r.Content, nil
- default:
- return "", fmt.Errorf("unsupported content encoding: %v", encoding)
- }
-}
-
-// GetReadme gets the Readme file for the repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/contents/#get-the-readme
-func (s *RepositoriesService) GetReadme(owner, repo string, opt *RepositoryContentGetOptions) (*RepositoryContent, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/readme", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
- readme := new(RepositoryContent)
- resp, err := s.client.Do(req, readme)
- if err != nil {
- return nil, resp, err
- }
- return readme, resp, err
-}
-
-// DownloadContents returns an io.ReadCloser that reads the contents of the
-// specified file. This function will work with files of any size, as opposed
-// to GetContents which is limited to 1 Mb files. It is the caller's
-// responsibility to close the ReadCloser.
-func (s *RepositoriesService) DownloadContents(owner, repo, filepath string, opt *RepositoryContentGetOptions) (io.ReadCloser, error) {
- dir := path.Dir(filepath)
- filename := path.Base(filepath)
- _, dirContents, _, err := s.GetContents(owner, repo, dir, opt)
- if err != nil {
- return nil, err
- }
- for _, contents := range dirContents {
- if *contents.Name == filename {
- if contents.DownloadURL == nil || *contents.DownloadURL == "" {
- return nil, fmt.Errorf("No download link found for %s", filepath)
- }
- resp, err := s.client.client.Get(*contents.DownloadURL)
- if err != nil {
- return nil, err
- }
- return resp.Body, nil
- }
- }
- return nil, fmt.Errorf("No file named %s found in %s", filename, dir)
-}
-
-// GetContents can return either the metadata and content of a single file
-// (when path references a file) or the metadata of all the files and/or
-// subdirectories of a directory (when path references a directory). To make it
-// easy to distinguish between both result types and to mimic the API as much
-// as possible, both result types will be returned but only one will contain a
-// value and the other will be nil.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/contents/#get-contents
-func (s *RepositoriesService) GetContents(owner, repo, path string, opt *RepositoryContentGetOptions) (fileContent *RepositoryContent, directoryContent []*RepositoryContent, resp *Response, err error) {
- escapedPath := (&url.URL{Path: path}).String()
- u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, escapedPath)
- u, err = addOptions(u, opt)
- if err != nil {
- return nil, nil, nil, err
- }
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, nil, err
- }
- var rawJSON json.RawMessage
- resp, err = s.client.Do(req, &rawJSON)
- if err != nil {
- return nil, nil, resp, err
- }
- fileUnmarshalError := json.Unmarshal(rawJSON, &fileContent)
- if fileUnmarshalError == nil {
- return fileContent, nil, resp, fileUnmarshalError
- }
- directoryUnmarshalError := json.Unmarshal(rawJSON, &directoryContent)
- if directoryUnmarshalError == nil {
- return nil, directoryContent, resp, directoryUnmarshalError
- }
- return nil, nil, resp, fmt.Errorf("unmarshalling failed for both file and directory content: %s and %s ", fileUnmarshalError, directoryUnmarshalError)
-}
-
-// CreateFile creates a new file in a repository at the given path and returns
-// the commit and file metadata.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/contents/#create-a-file
-func (s *RepositoriesService) CreateFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path)
- req, err := s.client.NewRequest("PUT", u, opt)
- if err != nil {
- return nil, nil, err
- }
- createResponse := new(RepositoryContentResponse)
- resp, err := s.client.Do(req, createResponse)
- if err != nil {
- return nil, resp, err
- }
- return createResponse, resp, err
-}
-
-// UpdateFile updates a file in a repository at the given path and returns the
-// commit and file metadata. Requires the blob SHA of the file being updated.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/contents/#update-a-file
-func (s *RepositoriesService) UpdateFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path)
- req, err := s.client.NewRequest("PUT", u, opt)
- if err != nil {
- return nil, nil, err
- }
- updateResponse := new(RepositoryContentResponse)
- resp, err := s.client.Do(req, updateResponse)
- if err != nil {
- return nil, resp, err
- }
- return updateResponse, resp, err
-}
-
-// DeleteFile deletes a file from a repository and returns the commit.
-// Requires the blob SHA of the file to be deleted.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/contents/#delete-a-file
-func (s *RepositoriesService) DeleteFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path)
- req, err := s.client.NewRequest("DELETE", u, opt)
- if err != nil {
- return nil, nil, err
- }
- deleteResponse := new(RepositoryContentResponse)
- resp, err := s.client.Do(req, deleteResponse)
- if err != nil {
- return nil, resp, err
- }
- return deleteResponse, resp, err
-}
-
-// archiveFormat is used to define the archive type when calling GetArchiveLink.
-type archiveFormat string
-
-const (
- // Tarball specifies an archive in gzipped tar format.
- Tarball archiveFormat = "tarball"
-
- // Zipball specifies an archive in zip format.
- Zipball archiveFormat = "zipball"
-)
-
-// GetArchiveLink returns an URL to download a tarball or zipball archive for a
-// repository. The archiveFormat can be specified by either the github.Tarball
-// or github.Zipball constant.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/contents/#get-archive-link
-func (s *RepositoriesService) GetArchiveLink(owner, repo string, archiveformat archiveFormat, opt *RepositoryContentGetOptions) (*url.URL, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/%s", owner, repo, archiveformat)
- if opt != nil && opt.Ref != "" {
- u += fmt.Sprintf("/%s", opt.Ref)
- }
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
- var resp *http.Response
- // Use http.DefaultTransport if no custom Transport is configured
- if s.client.client.Transport == nil {
- resp, err = http.DefaultTransport.RoundTrip(req)
- } else {
- resp, err = s.client.client.Transport.RoundTrip(req)
- }
- if err != nil || resp.StatusCode != http.StatusFound {
- return nil, newResponse(resp), err
- }
- parsedURL, err := url.Parse(resp.Header.Get("Location"))
- return parsedURL, newResponse(resp), err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_contents_test.go b/vendor/src/github.com/google/go-github/github/repos_contents_test.go
deleted file mode 100644
index 6d025ab..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_contents_test.go
+++ /dev/null
@@ -1,412 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoryContent_Decode(t *testing.T) {
- tests := []struct {
- encoding, content *string // input encoding and content
- want string // desired output
- wantErr bool // whether an error is expected
- }{
- {
- encoding: String("base64"),
- content: String("aGVsbG8="),
- want: "hello",
- wantErr: false,
- },
- {
- encoding: String("bad"),
- content: String("aGVsbG8="),
- want: "",
- wantErr: true,
- },
- }
-
- for _, tt := range tests {
- r := RepositoryContent{Encoding: tt.encoding, Content: tt.content}
- o, err := r.Decode()
- if err != nil && !tt.wantErr {
- t.Errorf("RepositoryContent(%q, %q) returned unexpected error: %v", tt.encoding, tt.content, err)
- }
- if err == nil && tt.wantErr {
- t.Errorf("RepositoryContent(%q, %q) did not return unexpected error", tt.encoding, tt.content)
- }
- if got, want := string(o), tt.want; got != want {
- t.Errorf("RepositoryContent.Decode returned %+v, want %+v", got, want)
- }
- }
-}
-
-func TestRepositoryContent_GetContent(t *testing.T) {
- tests := []struct {
- encoding, content *string // input encoding and content
- want string // desired output
- wantErr bool // whether an error is expected
- }{
- {
- encoding: String(""),
- content: String("hello"),
- want: "hello",
- wantErr: false,
- },
- {
- encoding: nil,
- content: String("hello"),
- want: "hello",
- wantErr: false,
- },
- {
- encoding: nil,
- content: nil,
- want: "",
- wantErr: false,
- },
- {
- encoding: String("base64"),
- content: String("aGVsbG8="),
- want: "hello",
- wantErr: false,
- },
- {
- encoding: String("bad"),
- content: String("aGVsbG8="),
- want: "",
- wantErr: true,
- },
- }
-
- for _, tt := range tests {
- r := RepositoryContent{Encoding: tt.encoding, Content: tt.content}
- got, err := r.GetContent()
- if err != nil && !tt.wantErr {
- t.Errorf("RepositoryContent(%q, %q) returned unexpected error: %v", tt.encoding, tt.content, err)
- }
- if err == nil && tt.wantErr {
- t.Errorf("RepositoryContent(%q, %q) did not return unexpected error", tt.encoding, tt.content)
- }
- if want := tt.want; got != want {
- t.Errorf("RepositoryContent.GetContent returned %+v, want %+v", got, want)
- }
- }
-}
-
-func TestRepositoriesService_GetReadme(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/readme", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{
- "type": "file",
- "encoding": "base64",
- "size": 5362,
- "name": "README.md",
- "path": "README.md"
- }`)
- })
- readme, _, err := client.Repositories.GetReadme("o", "r", &RepositoryContentGetOptions{})
- if err != nil {
- t.Errorf("Repositories.GetReadme returned error: %v", err)
- }
- want := &RepositoryContent{Type: String("file"), Name: String("README.md"), Size: Int(5362), Encoding: String("base64"), Path: String("README.md")}
- if !reflect.DeepEqual(readme, want) {
- t.Errorf("Repositories.GetReadme returned %+v, want %+v", readme, want)
- }
-}
-
-func TestRepositoriesService_DownloadContents_Success(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/d", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{
- "type": "file",
- "name": "f",
- "download_url": "`+server.URL+`/download/f"
- }]`)
- })
- mux.HandleFunc("/download/f", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, "foo")
- })
-
- r, err := client.Repositories.DownloadContents("o", "r", "d/f", nil)
- if err != nil {
- t.Errorf("Repositories.DownloadContents returned error: %v", err)
- }
-
- bytes, err := ioutil.ReadAll(r)
- if err != nil {
- t.Errorf("Error reading response body: %v", err)
- }
- r.Close()
-
- if got, want := string(bytes), "foo"; got != want {
- t.Errorf("Repositories.DownloadContents returned %v, want %v", got, want)
- }
-}
-
-func TestRepositoriesService_DownloadContents_NoDownloadURL(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/d", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{
- "type": "file",
- "name": "f",
- }]`)
- })
-
- _, err := client.Repositories.DownloadContents("o", "r", "d/f", nil)
- if err == nil {
- t.Errorf("Repositories.DownloadContents did not return expected error")
- }
-}
-
-func TestRepositoriesService_DownloadContents_NoFile(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/d", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[]`)
- })
-
- _, err := client.Repositories.DownloadContents("o", "r", "d/f", nil)
- if err == nil {
- t.Errorf("Repositories.DownloadContents did not return expected error")
- }
-}
-
-func TestRepositoriesService_GetContents_File(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{
- "type": "file",
- "encoding": "base64",
- "size": 20678,
- "name": "LICENSE",
- "path": "LICENSE"
- }`)
- })
- fileContents, _, _, err := client.Repositories.GetContents("o", "r", "p", &RepositoryContentGetOptions{})
- if err != nil {
- t.Errorf("Repositories.GetContents returned error: %v", err)
- }
- want := &RepositoryContent{Type: String("file"), Name: String("LICENSE"), Size: Int(20678), Encoding: String("base64"), Path: String("LICENSE")}
- if !reflect.DeepEqual(fileContents, want) {
- t.Errorf("Repositories.GetContents returned %+v, want %+v", fileContents, want)
- }
-}
-
-func TestRepositoriesService_GetContents_FilenameNeedsEscape(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/p#?%/ä¸.go", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{}`)
- })
- _, _, _, err := client.Repositories.GetContents("o", "r", "p#?%/ä¸.go", &RepositoryContentGetOptions{})
- if err != nil {
- t.Fatalf("Repositories.GetContents returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_GetContents_DirectoryWithSpaces(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/some directory/file.go", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{}`)
- })
- _, _, _, err := client.Repositories.GetContents("o", "r", "some directory/file.go", &RepositoryContentGetOptions{})
- if err != nil {
- t.Fatalf("Repositories.GetContents returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_GetContents_DirectoryWithPlusChars(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/some directory+name/file.go", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{}`)
- })
- _, _, _, err := client.Repositories.GetContents("o", "r", "some directory+name/file.go", &RepositoryContentGetOptions{})
- if err != nil {
- t.Fatalf("Repositories.GetContents returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_GetContents_Directory(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{
- "type": "dir",
- "name": "lib",
- "path": "lib"
- },
- {
- "type": "file",
- "size": 20678,
- "name": "LICENSE",
- "path": "LICENSE"
- }]`)
- })
- _, directoryContents, _, err := client.Repositories.GetContents("o", "r", "p", &RepositoryContentGetOptions{})
- if err != nil {
- t.Errorf("Repositories.GetContents returned error: %v", err)
- }
- want := []*RepositoryContent{{Type: String("dir"), Name: String("lib"), Path: String("lib")},
- {Type: String("file"), Name: String("LICENSE"), Size: Int(20678), Path: String("LICENSE")}}
- if !reflect.DeepEqual(directoryContents, want) {
- t.Errorf("Repositories.GetContents_Directory returned %+v, want %+v", directoryContents, want)
- }
-}
-
-func TestRepositoriesService_CreateFile(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- fmt.Fprint(w, `{
- "content":{
- "name":"p"
- },
- "commit":{
- "message":"m",
- "sha":"f5f369044773ff9c6383c087466d12adb6fa0828"
- }
- }`)
- })
- message := "m"
- content := []byte("c")
- repositoryContentsOptions := &RepositoryContentFileOptions{
- Message: &message,
- Content: content,
- Committer: &CommitAuthor{Name: String("n"), Email: String("e")},
- }
- createResponse, _, err := client.Repositories.CreateFile("o", "r", "p", repositoryContentsOptions)
- if err != nil {
- t.Errorf("Repositories.CreateFile returned error: %v", err)
- }
- want := &RepositoryContentResponse{
- Content: &RepositoryContent{Name: String("p")},
- Commit: Commit{
- Message: String("m"),
- SHA: String("f5f369044773ff9c6383c087466d12adb6fa0828"),
- },
- }
- if !reflect.DeepEqual(createResponse, want) {
- t.Errorf("Repositories.CreateFile returned %+v, want %+v", createResponse, want)
- }
-}
-
-func TestRepositoriesService_UpdateFile(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- fmt.Fprint(w, `{
- "content":{
- "name":"p"
- },
- "commit":{
- "message":"m",
- "sha":"f5f369044773ff9c6383c087466d12adb6fa0828"
- }
- }`)
- })
- message := "m"
- content := []byte("c")
- sha := "f5f369044773ff9c6383c087466d12adb6fa0828"
- repositoryContentsOptions := &RepositoryContentFileOptions{
- Message: &message,
- Content: content,
- SHA: &sha,
- Committer: &CommitAuthor{Name: String("n"), Email: String("e")},
- }
- updateResponse, _, err := client.Repositories.UpdateFile("o", "r", "p", repositoryContentsOptions)
- if err != nil {
- t.Errorf("Repositories.UpdateFile returned error: %v", err)
- }
- want := &RepositoryContentResponse{
- Content: &RepositoryContent{Name: String("p")},
- Commit: Commit{
- Message: String("m"),
- SHA: String("f5f369044773ff9c6383c087466d12adb6fa0828"),
- },
- }
- if !reflect.DeepEqual(updateResponse, want) {
- t.Errorf("Repositories.UpdateFile returned %+v, want %+v", updateResponse, want)
- }
-}
-
-func TestRepositoriesService_DeleteFile(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/contents/p", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- fmt.Fprint(w, `{
- "content": null,
- "commit":{
- "message":"m",
- "sha":"f5f369044773ff9c6383c087466d12adb6fa0828"
- }
- }`)
- })
- message := "m"
- sha := "f5f369044773ff9c6383c087466d12adb6fa0828"
- repositoryContentsOptions := &RepositoryContentFileOptions{
- Message: &message,
- SHA: &sha,
- Committer: &CommitAuthor{Name: String("n"), Email: String("e")},
- }
- deleteResponse, _, err := client.Repositories.DeleteFile("o", "r", "p", repositoryContentsOptions)
- if err != nil {
- t.Errorf("Repositories.DeleteFile returned error: %v", err)
- }
- want := &RepositoryContentResponse{
- Content: nil,
- Commit: Commit{
- Message: String("m"),
- SHA: String("f5f369044773ff9c6383c087466d12adb6fa0828"),
- },
- }
- if !reflect.DeepEqual(deleteResponse, want) {
- t.Errorf("Repositories.DeleteFile returned %+v, want %+v", deleteResponse, want)
- }
-}
-
-func TestRepositoriesService_GetArchiveLink(t *testing.T) {
- setup()
- defer teardown()
- mux.HandleFunc("/repos/o/r/tarball", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- http.Redirect(w, r, "http://github.com/a", http.StatusFound)
- })
- url, resp, err := client.Repositories.GetArchiveLink("o", "r", Tarball, &RepositoryContentGetOptions{})
- if err != nil {
- t.Errorf("Repositories.GetArchiveLink returned error: %v", err)
- }
- if resp.StatusCode != http.StatusFound {
- t.Errorf("Repositories.GetArchiveLink returned status: %d, want %d", resp.StatusCode, http.StatusFound)
- }
- want := "http://github.com/a"
- if url.String() != want {
- t.Errorf("Repositories.GetArchiveLink returned %+v, want %+v", url.String(), want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_deployments.go b/vendor/src/github.com/google/go-github/github/repos_deployments.go
deleted file mode 100644
index f3272b0..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_deployments.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
-)
-
-// Deployment represents a deployment in a repo
-type Deployment struct {
- URL *string `json:"url,omitempty"`
- ID *int `json:"id,omitempty"`
- SHA *string `json:"sha,omitempty"`
- Ref *string `json:"ref,omitempty"`
- Task *string `json:"task,omitempty"`
- Payload json.RawMessage `json:"payload,omitempty"`
- Environment *string `json:"environment,omitempty"`
- Description *string `json:"description,omitempty"`
- Creator *User `json:"creator,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"pushed_at,omitempty"`
- StatusesURL *string `json:"statuses_url,omitempty"`
- RepositoryURL *string `json:"repository_url,omitempty"`
-}
-
-// DeploymentRequest represents a deployment request
-type DeploymentRequest struct {
- Ref *string `json:"ref,omitempty"`
- Task *string `json:"task,omitempty"`
- AutoMerge *bool `json:"auto_merge,omitempty"`
- RequiredContexts *[]string `json:"required_contexts,omitempty"`
- Payload *string `json:"payload,omitempty"`
- Environment *string `json:"environment,omitempty"`
- Description *string `json:"description,omitempty"`
- TransientEnvironment *bool `json:"transient_environment,omitempty"`
- ProductionEnvironment *bool `json:"production_environment,omitempty"`
-}
-
-// DeploymentsListOptions specifies the optional parameters to the
-// RepositoriesService.ListDeployments method.
-type DeploymentsListOptions struct {
- // SHA of the Deployment.
- SHA string `url:"sha,omitempty"`
-
- // List deployments for a given ref.
- Ref string `url:"ref,omitempty"`
-
- // List deployments for a given task.
- Task string `url:"task,omitempty"`
-
- // List deployments for a given environment.
- Environment string `url:"environment,omitempty"`
-
- ListOptions
-}
-
-// ListDeployments lists the deployments of a repository.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/deployments/#list-deployments
-func (s *RepositoriesService) ListDeployments(owner, repo string, opt *DeploymentsListOptions) ([]*Deployment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- deployments := new([]*Deployment)
- resp, err := s.client.Do(req, deployments)
- if err != nil {
- return nil, resp, err
- }
-
- return *deployments, resp, err
-}
-
-// CreateDeployment creates a new deployment for a repository.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment
-func (s *RepositoriesService) CreateDeployment(owner, repo string, request *DeploymentRequest) (*Deployment, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo)
-
- req, err := s.client.NewRequest("POST", u, request)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when deployment support fully launches
- req.Header.Set("Accept", mediaTypeDeploymentStatusPreview)
-
- d := new(Deployment)
- resp, err := s.client.Do(req, d)
- if err != nil {
- return nil, resp, err
- }
-
- return d, resp, err
-}
-
-// DeploymentStatus represents the status of a
-// particular deployment.
-type DeploymentStatus struct {
- ID *int `json:"id,omitempty"`
- // State is the deployment state.
- // Possible values are: "pending", "success", "failure", "error", "inactive".
- State *string `json:"state,omitempty"`
- Creator *User `json:"creator,omitempty"`
- Description *string `json:"description,omitempty"`
- TargetURL *string `json:"target_url,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"pushed_at,omitempty"`
- DeploymentURL *string `json:"deployment_url,omitempty"`
- RepositoryURL *string `json:"repository_url,omitempty"`
-}
-
-// DeploymentStatusRequest represents a deployment request
-type DeploymentStatusRequest struct {
- State *string `json:"state,omitempty"`
- TargetURL *string `json:"target_url,omitempty"` // Deprecated. Use LogURL instead.
- LogURL *string `json:"log_url,omitempty"`
- Description *string `json:"description,omitempty"`
- EnvironmentURL *string `json:"environment_url,omitempty"`
- AutoInactive *bool `json:"auto_inactive,omitempty"`
-}
-
-// ListDeploymentStatuses lists the statuses of a given deployment of a repository.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/deployments/#list-deployment-statuses
-func (s *RepositoriesService) ListDeploymentStatuses(owner, repo string, deployment int, opt *ListOptions) ([]*DeploymentStatus, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- statuses := new([]*DeploymentStatus)
- resp, err := s.client.Do(req, statuses)
- if err != nil {
- return nil, resp, err
- }
-
- return *statuses, resp, err
-}
-
-// CreateDeploymentStatus creates a new status for a deployment.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment-status
-func (s *RepositoriesService) CreateDeploymentStatus(owner, repo string, deployment int, request *DeploymentStatusRequest) (*DeploymentStatus, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment)
-
- req, err := s.client.NewRequest("POST", u, request)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when deployment support fully launches
- req.Header.Set("Accept", mediaTypeDeploymentStatusPreview)
-
- d := new(DeploymentStatus)
- resp, err := s.client.Do(req, d)
- if err != nil {
- return nil, resp, err
- }
-
- return d, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_deployments_test.go b/vendor/src/github.com/google/go-github/github/repos_deployments_test.go
deleted file mode 100644
index 4d77723..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_deployments_test.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_ListDeployments(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/deployments", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"environment": "test"})
- fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
- })
-
- opt := &DeploymentsListOptions{Environment: "test"}
- deployments, _, err := client.Repositories.ListDeployments("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListDeployments returned error: %v", err)
- }
-
- want := []*Deployment{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(deployments, want) {
- t.Errorf("Repositories.ListDeployments returned %+v, want %+v", deployments, want)
- }
-}
-
-func TestRepositoriesService_CreateDeployment(t *testing.T) {
- setup()
- defer teardown()
-
- input := &DeploymentRequest{Ref: String("1111"), Task: String("deploy"), TransientEnvironment: Bool(true)}
-
- mux.HandleFunc("/repos/o/r/deployments", func(w http.ResponseWriter, r *http.Request) {
- v := new(DeploymentRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- testHeader(t, r, "Accept", mediaTypeDeploymentStatusPreview)
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"ref": "1111", "task": "deploy"}`)
- })
-
- deployment, _, err := client.Repositories.CreateDeployment("o", "r", input)
- if err != nil {
- t.Errorf("Repositories.CreateDeployment returned error: %v", err)
- }
-
- want := &Deployment{Ref: String("1111"), Task: String("deploy")}
- if !reflect.DeepEqual(deployment, want) {
- t.Errorf("Repositories.CreateDeployment returned %+v, want %+v", deployment, want)
- }
-}
-
-func TestRepositoriesService_ListDeploymentStatuses(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/deployments/1/statuses", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
- })
-
- opt := &ListOptions{Page: 2}
- statutses, _, err := client.Repositories.ListDeploymentStatuses("o", "r", 1, opt)
- if err != nil {
- t.Errorf("Repositories.ListDeploymentStatuses returned error: %v", err)
- }
-
- want := []*DeploymentStatus{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(statutses, want) {
- t.Errorf("Repositories.ListDeploymentStatuses returned %+v, want %+v", statutses, want)
- }
-}
-
-func TestRepositoriesService_CreateDeploymentStatus(t *testing.T) {
- setup()
- defer teardown()
-
- input := &DeploymentStatusRequest{State: String("inactive"), Description: String("deploy"), AutoInactive: Bool(false)}
-
- mux.HandleFunc("/repos/o/r/deployments/1/statuses", func(w http.ResponseWriter, r *http.Request) {
- v := new(DeploymentStatusRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- testHeader(t, r, "Accept", mediaTypeDeploymentStatusPreview)
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"state": "inactive", "description": "deploy"}`)
- })
-
- deploymentStatus, _, err := client.Repositories.CreateDeploymentStatus("o", "r", 1, input)
- if err != nil {
- t.Errorf("Repositories.CreateDeploymentStatus returned error: %v", err)
- }
-
- want := &DeploymentStatus{State: String("inactive"), Description: String("deploy")}
- if !reflect.DeepEqual(deploymentStatus, want) {
- t.Errorf("Repositories.CreateDeploymentStatus returned %+v, want %+v", deploymentStatus, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_forks.go b/vendor/src/github.com/google/go-github/github/repos_forks.go
deleted file mode 100644
index 92e9f27..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_forks.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// RepositoryListForksOptions specifies the optional parameters to the
-// RepositoriesService.ListForks method.
-type RepositoryListForksOptions struct {
- // How to sort the forks list. Possible values are: newest, oldest,
- // watchers. Default is "newest".
- Sort string `url:"sort,omitempty"`
-
- ListOptions
-}
-
-// ListForks lists the forks of the specified repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/forks/#list-forks
-func (s *RepositoriesService) ListForks(owner, repo string, opt *RepositoryListForksOptions) ([]*Repository, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/forks", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- repos := new([]*Repository)
- resp, err := s.client.Do(req, repos)
- if err != nil {
- return nil, resp, err
- }
-
- return *repos, resp, err
-}
-
-// RepositoryCreateForkOptions specifies the optional parameters to the
-// RepositoriesService.CreateFork method.
-type RepositoryCreateForkOptions struct {
- // The organization to fork the repository into.
- Organization string `url:"organization,omitempty"`
-}
-
-// CreateFork creates a fork of the specified repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/forks/#list-forks
-func (s *RepositoriesService) CreateFork(owner, repo string, opt *RepositoryCreateForkOptions) (*Repository, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/forks", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- fork := new(Repository)
- resp, err := s.client.Do(req, fork)
- if err != nil {
- return nil, resp, err
- }
-
- return fork, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_forks_test.go b/vendor/src/github.com/google/go-github/github/repos_forks_test.go
deleted file mode 100644
index 3d2baa5..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_forks_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_ListForks(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/forks", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "sort": "newest",
- "page": "3",
- })
- fmt.Fprint(w, `[{"id":1},{"id":2}]`)
- })
-
- opt := &RepositoryListForksOptions{
- Sort: "newest",
- ListOptions: ListOptions{Page: 3},
- }
- repos, _, err := client.Repositories.ListForks("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListForks returned error: %v", err)
- }
-
- want := []*Repository{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(repos, want) {
- t.Errorf("Repositories.ListForks returned %+v, want %+v", repos, want)
- }
-}
-
-func TestRepositoriesService_ListForks_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.ListForks("%", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_CreateFork(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/forks", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testFormValues(t, r, values{"organization": "o"})
- fmt.Fprint(w, `{"id":1}`)
- })
-
- opt := &RepositoryCreateForkOptions{Organization: "o"}
- repo, _, err := client.Repositories.CreateFork("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.CreateFork returned error: %v", err)
- }
-
- want := &Repository{ID: Int(1)}
- if !reflect.DeepEqual(repo, want) {
- t.Errorf("Repositories.CreateFork returned %+v, want %+v", repo, want)
- }
-}
-
-func TestRepositoriesService_CreateFork_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.CreateFork("%", "r", nil)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_hooks.go b/vendor/src/github.com/google/go-github/github/repos_hooks.go
deleted file mode 100644
index fe725b4..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_hooks.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// WebHookPayload represents the data that is received from GitHub when a push
-// event hook is triggered. The format of these payloads pre-date most of the
-// GitHub v3 API, so there are lots of minor incompatibilities with the types
-// defined in the rest of the API. Therefore, several types are duplicated
-// here to account for these differences.
-//
-// GitHub API docs: https://help.github.com/articles/post-receive-hooks
-type WebHookPayload struct {
- After *string `json:"after,omitempty"`
- Before *string `json:"before,omitempty"`
- Commits []WebHookCommit `json:"commits,omitempty"`
- Compare *string `json:"compare,omitempty"`
- Created *bool `json:"created,omitempty"`
- Deleted *bool `json:"deleted,omitempty"`
- Forced *bool `json:"forced,omitempty"`
- HeadCommit *WebHookCommit `json:"head_commit,omitempty"`
- Pusher *User `json:"pusher,omitempty"`
- Ref *string `json:"ref,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Sender *User `json:"sender,omitempty"`
-}
-
-func (w WebHookPayload) String() string {
- return Stringify(w)
-}
-
-// WebHookCommit represents the commit variant we receive from GitHub in a
-// WebHookPayload.
-type WebHookCommit struct {
- Added []string `json:"added,omitempty"`
- Author *WebHookAuthor `json:"author,omitempty"`
- Committer *WebHookAuthor `json:"committer,omitempty"`
- Distinct *bool `json:"distinct,omitempty"`
- ID *string `json:"id,omitempty"`
- Message *string `json:"message,omitempty"`
- Modified []string `json:"modified,omitempty"`
- Removed []string `json:"removed,omitempty"`
- Timestamp *time.Time `json:"timestamp,omitempty"`
-}
-
-func (w WebHookCommit) String() string {
- return Stringify(w)
-}
-
-// WebHookAuthor represents the author or committer of a commit, as specified
-// in a WebHookCommit. The commit author may not correspond to a GitHub User.
-type WebHookAuthor struct {
- Email *string `json:"email,omitempty"`
- Name *string `json:"name,omitempty"`
- Username *string `json:"username,omitempty"`
-}
-
-func (w WebHookAuthor) String() string {
- return Stringify(w)
-}
-
-// Hook represents a GitHub (web and service) hook for a repository.
-type Hook struct {
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
- Name *string `json:"name,omitempty"`
- URL *string `json:"url,omitempty"`
- Events []string `json:"events,omitempty"`
- Active *bool `json:"active,omitempty"`
- Config map[string]interface{} `json:"config,omitempty"`
- ID *int `json:"id,omitempty"`
-}
-
-func (h Hook) String() string {
- return Stringify(h)
-}
-
-// CreateHook creates a Hook for the specified repository.
-// Name and Config are required fields.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/hooks/#create-a-hook
-func (s *RepositoriesService) CreateHook(owner, repo string, hook *Hook) (*Hook, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo)
- req, err := s.client.NewRequest("POST", u, hook)
- if err != nil {
- return nil, nil, err
- }
-
- h := new(Hook)
- resp, err := s.client.Do(req, h)
- if err != nil {
- return nil, resp, err
- }
-
- return h, resp, err
-}
-
-// ListHooks lists all Hooks for the specified repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/hooks/#list
-func (s *RepositoriesService) ListHooks(owner, repo string, opt *ListOptions) ([]*Hook, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- hooks := new([]*Hook)
- resp, err := s.client.Do(req, hooks)
- if err != nil {
- return nil, resp, err
- }
-
- return *hooks, resp, err
-}
-
-// GetHook returns a single specified Hook.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/hooks/#get-single-hook
-func (s *RepositoriesService) GetHook(owner, repo string, id int) (*Hook, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
- hook := new(Hook)
- resp, err := s.client.Do(req, hook)
- return hook, resp, err
-}
-
-// EditHook updates a specified Hook.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/hooks/#edit-a-hook
-func (s *RepositoriesService) EditHook(owner, repo string, id int, hook *Hook) (*Hook, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id)
- req, err := s.client.NewRequest("PATCH", u, hook)
- if err != nil {
- return nil, nil, err
- }
- h := new(Hook)
- resp, err := s.client.Do(req, h)
- return h, resp, err
-}
-
-// DeleteHook deletes a specified Hook.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/hooks/#delete-a-hook
-func (s *RepositoriesService) DeleteHook(owner, repo string, id int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// PingHook triggers a 'ping' event to be sent to the Hook.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/hooks/#ping-a-hook
-func (s *RepositoriesService) PingHook(owner, repo string, id int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/hooks/%d/pings", owner, repo, id)
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// TestHook triggers a test Hook by github.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/hooks/#test-a-push-hook
-func (s *RepositoriesService) TestHook(owner, repo string, id int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/hooks/%d/tests", owner, repo, id)
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// ListServiceHooks is deprecated. Use Client.ListServiceHooks instead.
-func (s *RepositoriesService) ListServiceHooks() ([]*ServiceHook, *Response, error) {
- return s.client.ListServiceHooks()
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_hooks_test.go b/vendor/src/github.com/google/go-github/github/repos_hooks_test.go
deleted file mode 100644
index fcfc9a7..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_hooks_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_CreateHook(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Hook{Name: String("t")}
-
- mux.HandleFunc("/repos/o/r/hooks", func(w http.ResponseWriter, r *http.Request) {
- v := new(Hook)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- hook, _, err := client.Repositories.CreateHook("o", "r", input)
- if err != nil {
- t.Errorf("Repositories.CreateHook returned error: %v", err)
- }
-
- want := &Hook{ID: Int(1)}
- if !reflect.DeepEqual(hook, want) {
- t.Errorf("Repositories.CreateHook returned %+v, want %+v", hook, want)
- }
-}
-
-func TestRepositoriesService_CreateHook_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.CreateHook("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_ListHooks(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/hooks", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}, {"id":2}]`)
- })
-
- opt := &ListOptions{Page: 2}
-
- hooks, _, err := client.Repositories.ListHooks("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListHooks returned error: %v", err)
- }
-
- want := []*Hook{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(hooks, want) {
- t.Errorf("Repositories.ListHooks returned %+v, want %+v", hooks, want)
- }
-}
-
-func TestRepositoriesService_ListHooks_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.ListHooks("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_GetHook(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/hooks/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- hook, _, err := client.Repositories.GetHook("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.GetHook returned error: %v", err)
- }
-
- want := &Hook{ID: Int(1)}
- if !reflect.DeepEqual(hook, want) {
- t.Errorf("Repositories.GetHook returned %+v, want %+v", hook, want)
- }
-}
-
-func TestRepositoriesService_GetHook_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.GetHook("%", "%", 1)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_EditHook(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Hook{Name: String("t")}
-
- mux.HandleFunc("/repos/o/r/hooks/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(Hook)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- hook, _, err := client.Repositories.EditHook("o", "r", 1, input)
- if err != nil {
- t.Errorf("Repositories.EditHook returned error: %v", err)
- }
-
- want := &Hook{ID: Int(1)}
- if !reflect.DeepEqual(hook, want) {
- t.Errorf("Repositories.EditHook returned %+v, want %+v", hook, want)
- }
-}
-
-func TestRepositoriesService_EditHook_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.EditHook("%", "%", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_DeleteHook(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/hooks/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Repositories.DeleteHook("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.DeleteHook returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_DeleteHook_invalidOwner(t *testing.T) {
- _, err := client.Repositories.DeleteHook("%", "%", 1)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_PingHook(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/hooks/1/pings", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- })
-
- _, err := client.Repositories.PingHook("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.PingHook returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_TestHook(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/hooks/1/tests", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- })
-
- _, err := client.Repositories.TestHook("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.TestHook returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_TestHook_invalidOwner(t *testing.T) {
- _, err := client.Repositories.TestHook("%", "%", 1)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_invitations.go b/vendor/src/github.com/google/go-github/github/repos_invitations.go
deleted file mode 100644
index f2806d1..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_invitations.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// RepositoryInvitation represents an invitation to collaborate on a repo.
-type RepositoryInvitation struct {
- ID *int `json:"id,omitempty"`
- Repo *Repository `json:"repository,omitempty"`
- Invitee *User `json:"invitee,omitempty"`
- Inviter *User `json:"inviter,omitempty"`
-
- // Permissions represents the permissions that the associated user will have
- // on the repository. Possible values are: "read", "write", "admin".
- Permissions *string `json:"permissions,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
-}
-
-// ListInvitations lists all currently-open repository invitations.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/invitations/#list-invitations-for-a-repository
-func (s *RepositoriesService) ListInvitations(repoID int, opt *ListOptions) ([]*RepositoryInvitation, *Response, error) {
- u := fmt.Sprintf("repositories/%v/invitations", repoID)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
-
- invites := []*RepositoryInvitation{}
- resp, err := s.client.Do(req, &invites)
- if err != nil {
- return nil, resp, err
- }
-
- return invites, resp, err
-}
-
-// DeleteInvitation deletes a repository invitation.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/invitations/#delete-a-repository-invitation
-func (s *RepositoriesService) DeleteInvitation(repoID, invitationID int) (*Response, error) {
- u := fmt.Sprintf("repositories/%v/invitations/%v", repoID, invitationID)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
-
- return s.client.Do(req, nil)
-}
-
-// UpdateInvitation updates the permissions associated with a repository
-// invitation.
-//
-// permissions represents the permissions that the associated user will have
-// on the repository. Possible values are: "read", "write", "admin".
-//
-// GitHub API docs: https://developer.github.com/v3/repos/invitations/#update-a-repository-invitation
-func (s *RepositoriesService) UpdateInvitation(repoID, invitationID int, permissions string) (*RepositoryInvitation, *Response, error) {
- opts := &struct {
- Permissions string `json:"permissions"`
- }{Permissions: permissions}
- u := fmt.Sprintf("repositories/%v/invitations/%v", repoID, invitationID)
- req, err := s.client.NewRequest("PATCH", u, opts)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
-
- invite := &RepositoryInvitation{}
- resp, err := s.client.Do(req, invite)
- return invite, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_invitations_test.go b/vendor/src/github.com/google/go-github/github/repos_invitations_test.go
deleted file mode 100644
index c5688cb..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_invitations_test.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_ListInvitations(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repositories/1/invitations", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview)
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprintf(w, `[{"id":1}, {"id":2}]`)
- })
-
- opt := &ListOptions{Page: 2}
- got, _, err := client.Repositories.ListInvitations(1, opt)
- if err != nil {
- t.Errorf("Repositories.ListInvitations returned error: %v", err)
- }
-
- want := []*RepositoryInvitation{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Repositories.ListInvitations = %+v, want %+v", got, want)
- }
-}
-
-func TestRepositoriesService_DeleteInvitation(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repositories/1/invitations/2", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview)
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Repositories.DeleteInvitation(1, 2)
- if err != nil {
- t.Errorf("Repositories.DeleteInvitation returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_UpdateInvitation(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repositories/1/invitations/2", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PATCH")
- testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview)
- fmt.Fprintf(w, `{"id":1}`)
- })
-
- got, _, err := client.Repositories.UpdateInvitation(1, 2, "write")
- if err != nil {
- t.Errorf("Repositories.UpdateInvitation returned error: %v", err)
- }
-
- want := &RepositoryInvitation{ID: Int(1)}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Repositories.UpdateInvitation = %+v, want %+v", got, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_keys.go b/vendor/src/github.com/google/go-github/github/repos_keys.go
deleted file mode 100644
index 0bb404a..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_keys.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// The Key type is defined in users_keys.go
-
-// ListKeys lists the deploy keys for a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/keys/#list
-func (s *RepositoriesService) ListKeys(owner string, repo string, opt *ListOptions) ([]*Key, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/keys", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- keys := new([]*Key)
- resp, err := s.client.Do(req, keys)
- if err != nil {
- return nil, resp, err
- }
-
- return *keys, resp, err
-}
-
-// GetKey fetches a single deploy key.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/keys/#get
-func (s *RepositoriesService) GetKey(owner string, repo string, id int) (*Key, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- key := new(Key)
- resp, err := s.client.Do(req, key)
- if err != nil {
- return nil, resp, err
- }
-
- return key, resp, err
-}
-
-// CreateKey adds a deploy key for a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/keys/#create
-func (s *RepositoriesService) CreateKey(owner string, repo string, key *Key) (*Key, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/keys", owner, repo)
-
- req, err := s.client.NewRequest("POST", u, key)
- if err != nil {
- return nil, nil, err
- }
-
- k := new(Key)
- resp, err := s.client.Do(req, k)
- if err != nil {
- return nil, resp, err
- }
-
- return k, resp, err
-}
-
-// EditKey edits a deploy key.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/keys/#edit
-func (s *RepositoriesService) EditKey(owner string, repo string, id int, key *Key) (*Key, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id)
-
- req, err := s.client.NewRequest("PATCH", u, key)
- if err != nil {
- return nil, nil, err
- }
-
- k := new(Key)
- resp, err := s.client.Do(req, k)
- if err != nil {
- return nil, resp, err
- }
-
- return k, resp, err
-}
-
-// DeleteKey deletes a deploy key.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/keys/#delete
-func (s *RepositoriesService) DeleteKey(owner string, repo string, id int) (*Response, error) {
- u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_keys_test.go b/vendor/src/github.com/google/go-github/github/repos_keys_test.go
deleted file mode 100644
index 3bea308..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_keys_test.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_ListKeys(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/keys", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- keys, _, err := client.Repositories.ListKeys("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListKeys returned error: %v", err)
- }
-
- want := []*Key{{ID: Int(1)}}
- if !reflect.DeepEqual(keys, want) {
- t.Errorf("Repositories.ListKeys returned %+v, want %+v", keys, want)
- }
-}
-
-func TestRepositoriesService_ListKeys_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.ListKeys("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_GetKey(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/keys/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- key, _, err := client.Repositories.GetKey("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.GetKey returned error: %v", err)
- }
-
- want := &Key{ID: Int(1)}
- if !reflect.DeepEqual(key, want) {
- t.Errorf("Repositories.GetKey returned %+v, want %+v", key, want)
- }
-}
-
-func TestRepositoriesService_GetKey_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.GetKey("%", "%", 1)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_CreateKey(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Key{Key: String("k"), Title: String("t")}
-
- mux.HandleFunc("/repos/o/r/keys", func(w http.ResponseWriter, r *http.Request) {
- v := new(Key)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- key, _, err := client.Repositories.CreateKey("o", "r", input)
- if err != nil {
- t.Errorf("Repositories.GetKey returned error: %v", err)
- }
-
- want := &Key{ID: Int(1)}
- if !reflect.DeepEqual(key, want) {
- t.Errorf("Repositories.GetKey returned %+v, want %+v", key, want)
- }
-}
-
-func TestRepositoriesService_CreateKey_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.CreateKey("%", "%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_EditKey(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Key{Key: String("k"), Title: String("t")}
-
- mux.HandleFunc("/repos/o/r/keys/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(Key)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- key, _, err := client.Repositories.EditKey("o", "r", 1, input)
- if err != nil {
- t.Errorf("Repositories.EditKey returned error: %v", err)
- }
-
- want := &Key{ID: Int(1)}
- if !reflect.DeepEqual(key, want) {
- t.Errorf("Repositories.EditKey returned %+v, want %+v", key, want)
- }
-}
-
-func TestRepositoriesService_EditKey_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.EditKey("%", "%", 1, nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_DeleteKey(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/keys/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Repositories.DeleteKey("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.DeleteKey returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_DeleteKey_invalidOwner(t *testing.T) {
- _, err := client.Repositories.DeleteKey("%", "%", 1)
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_merging.go b/vendor/src/github.com/google/go-github/github/repos_merging.go
deleted file mode 100644
index 31f8313..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_merging.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
-)
-
-// RepositoryMergeRequest represents a request to merge a branch in a
-// repository.
-type RepositoryMergeRequest struct {
- Base *string `json:"base,omitempty"`
- Head *string `json:"head,omitempty"`
- CommitMessage *string `json:"commit_message,omitempty"`
-}
-
-// Merge a branch in the specified repository.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/merging/#perform-a-merge
-func (s *RepositoriesService) Merge(owner, repo string, request *RepositoryMergeRequest) (*RepositoryCommit, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/merges", owner, repo)
- req, err := s.client.NewRequest("POST", u, request)
- if err != nil {
- return nil, nil, err
- }
-
- commit := new(RepositoryCommit)
- resp, err := s.client.Do(req, commit)
- if err != nil {
- return nil, resp, err
- }
-
- return commit, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_merging_test.go b/vendor/src/github.com/google/go-github/github/repos_merging_test.go
deleted file mode 100644
index 166c5e5..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_merging_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_Merge(t *testing.T) {
- setup()
- defer teardown()
-
- input := &RepositoryMergeRequest{
- Base: String("b"),
- Head: String("h"),
- CommitMessage: String("c"),
- }
-
- mux.HandleFunc("/repos/o/r/merges", func(w http.ResponseWriter, r *http.Request) {
- v := new(RepositoryMergeRequest)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"sha":"s"}`)
- })
-
- commit, _, err := client.Repositories.Merge("o", "r", input)
- if err != nil {
- t.Errorf("Repositories.Merge returned error: %v", err)
- }
-
- want := &RepositoryCommit{SHA: String("s")}
- if !reflect.DeepEqual(commit, want) {
- t.Errorf("Repositories.Merge returned %+v, want %+v", commit, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_pages.go b/vendor/src/github.com/google/go-github/github/repos_pages.go
deleted file mode 100644
index ccd24f3..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_pages.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// Pages represents a GitHub Pages site configuration.
-type Pages struct {
- URL *string `json:"url,omitempty"`
- Status *string `json:"status,omitempty"`
- CNAME *string `json:"cname,omitempty"`
- Custom404 *bool `json:"custom_404,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
-}
-
-// PagesError represents a build error for a GitHub Pages site.
-type PagesError struct {
- Message *string `json:"message,omitempty"`
-}
-
-// PagesBuild represents the build information for a GitHub Pages site.
-type PagesBuild struct {
- URL *string `json:"url,omitempty"`
- Status *string `json:"status,omitempty"`
- Error *PagesError `json:"error,omitempty"`
- Pusher *User `json:"pusher,omitempty"`
- Commit *string `json:"commit,omitempty"`
- Duration *int `json:"duration,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"created_at,omitempty"`
-}
-
-// GetPagesInfo fetches information about a GitHub Pages site.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/pages/#get-information-about-a-pages-site
-func (s *RepositoriesService) GetPagesInfo(owner string, repo string) (*Pages, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pages", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypePagesPreview)
-
- site := new(Pages)
- resp, err := s.client.Do(req, site)
- if err != nil {
- return nil, resp, err
- }
-
- return site, resp, err
-}
-
-// ListPagesBuilds lists the builds for a GitHub Pages site.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-pages-builds
-func (s *RepositoriesService) ListPagesBuilds(owner string, repo string) ([]*PagesBuild, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var pages []*PagesBuild
- resp, err := s.client.Do(req, &pages)
- if err != nil {
- return nil, resp, err
- }
-
- return pages, resp, err
-}
-
-// GetLatestPagesBuild fetches the latest build information for a GitHub pages site.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-latest-pages-build
-func (s *RepositoriesService) GetLatestPagesBuild(owner string, repo string) (*PagesBuild, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pages/builds/latest", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- build := new(PagesBuild)
- resp, err := s.client.Do(req, build)
- if err != nil {
- return nil, resp, err
- }
-
- return build, resp, err
-}
-
-// RequestPageBuild requests a build of a GitHub Pages site without needing to push new commit.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/pages/#request-a-page-build
-func (s *RepositoriesService) RequestPageBuild(owner string, repo string) (*PagesBuild, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo)
- req, err := s.client.NewRequest("POST", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypePagesPreview)
-
- build := new(PagesBuild)
- resp, err := s.client.Do(req, build)
- if err != nil {
- return nil, resp, err
- }
-
- return build, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_pages_test.go b/vendor/src/github.com/google/go-github/github/repos_pages_test.go
deleted file mode 100644
index 830d77f..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_pages_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_GetPagesInfo(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pages", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypePagesPreview)
- fmt.Fprint(w, `{"url":"u","status":"s","cname":"c","custom_404":false,"html_url":"h"}`)
- })
-
- page, _, err := client.Repositories.GetPagesInfo("o", "r")
- if err != nil {
- t.Errorf("Repositories.GetPagesInfo returned error: %v", err)
- }
-
- want := &Pages{URL: String("u"), Status: String("s"), CNAME: String("c"), Custom404: Bool(false), HTMLURL: String("h")}
- if !reflect.DeepEqual(page, want) {
- t.Errorf("Repositories.GetPagesInfo returned %+v, want %+v", page, want)
- }
-}
-
-func TestRepositoriesService_ListPagesBuilds(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pages/builds", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{"url":"u","status":"s","commit":"c"}]`)
- })
-
- pages, _, err := client.Repositories.ListPagesBuilds("o", "r")
- if err != nil {
- t.Errorf("Repositories.ListPagesBuilds returned error: %v", err)
- }
-
- want := []*PagesBuild{{URL: String("u"), Status: String("s"), Commit: String("c")}}
- if !reflect.DeepEqual(pages, want) {
- t.Errorf("Repositories.ListPagesBuilds returned %+v, want %+v", pages, want)
- }
-}
-
-func TestRepositoriesService_GetLatestPagesBuild(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pages/builds/latest", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"url":"u","status":"s","commit":"c"}`)
- })
-
- build, _, err := client.Repositories.GetLatestPagesBuild("o", "r")
- if err != nil {
- t.Errorf("Repositories.GetLatestPagesBuild returned error: %v", err)
- }
-
- want := &PagesBuild{URL: String("u"), Status: String("s"), Commit: String("c")}
- if !reflect.DeepEqual(build, want) {
- t.Errorf("Repositories.GetLatestPagesBuild returned %+v, want %+v", build, want)
- }
-}
-
-func TestRepositoriesService_RequestPageBuild(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/pages/builds", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testHeader(t, r, "Accept", mediaTypePagesPreview)
- fmt.Fprint(w, `{"url":"u","status":"s"}`)
- })
-
- build, _, err := client.Repositories.RequestPageBuild("o", "r")
- if err != nil {
- t.Errorf("Repositories.RequestPageBuild returned error: %v", err)
- }
-
- want := &PagesBuild{URL: String("u"), Status: String("s")}
- if !reflect.DeepEqual(build, want) {
- t.Errorf("Repositories.RequestPageBuild returned %+v, want %+v", build, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_releases.go b/vendor/src/github.com/google/go-github/github/repos_releases.go
deleted file mode 100644
index e889b0d..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_releases.go
+++ /dev/null
@@ -1,325 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "errors"
- "fmt"
- "io"
- "mime"
- "net/http"
- "os"
- "path/filepath"
- "strings"
-)
-
-// RepositoryRelease represents a GitHub release in a repository.
-type RepositoryRelease struct {
- ID *int `json:"id,omitempty"`
- TagName *string `json:"tag_name,omitempty"`
- TargetCommitish *string `json:"target_commitish,omitempty"`
- Name *string `json:"name,omitempty"`
- Body *string `json:"body,omitempty"`
- Draft *bool `json:"draft,omitempty"`
- Prerelease *bool `json:"prerelease,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- PublishedAt *Timestamp `json:"published_at,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- AssetsURL *string `json:"assets_url,omitempty"`
- Assets []ReleaseAsset `json:"assets,omitempty"`
- UploadURL *string `json:"upload_url,omitempty"`
- ZipballURL *string `json:"zipball_url,omitempty"`
- TarballURL *string `json:"tarball_url,omitempty"`
- Author *CommitAuthor `json:"author,omitempty"`
-}
-
-func (r RepositoryRelease) String() string {
- return Stringify(r)
-}
-
-// ReleaseAsset represents a Github release asset in a repository.
-type ReleaseAsset struct {
- ID *int `json:"id,omitempty"`
- URL *string `json:"url,omitempty"`
- Name *string `json:"name,omitempty"`
- Label *string `json:"label,omitempty"`
- State *string `json:"state,omitempty"`
- ContentType *string `json:"content_type,omitempty"`
- Size *int `json:"size,omitempty"`
- DownloadCount *int `json:"download_count,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- BrowserDownloadURL *string `json:"browser_download_url,omitempty"`
- Uploader *User `json:"uploader,omitempty"`
-}
-
-func (r ReleaseAsset) String() string {
- return Stringify(r)
-}
-
-// ListReleases lists the releases for a repository.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/releases/#list-releases-for-a-repository
-func (s *RepositoriesService) ListReleases(owner, repo string, opt *ListOptions) ([]*RepositoryRelease, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases", owner, repo)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- releases := new([]*RepositoryRelease)
- resp, err := s.client.Do(req, releases)
- if err != nil {
- return nil, resp, err
- }
- return *releases, resp, err
-}
-
-// GetRelease fetches a single release.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/releases/#get-a-single-release
-func (s *RepositoriesService) GetRelease(owner, repo string, id int) (*RepositoryRelease, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id)
- return s.getSingleRelease(u)
-}
-
-// GetLatestRelease fetches the latest published release for the repository.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-the-latest-release
-func (s *RepositoriesService) GetLatestRelease(owner, repo string) (*RepositoryRelease, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases/latest", owner, repo)
- return s.getSingleRelease(u)
-}
-
-// GetReleaseByTag fetches a release with the specified tag.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-release-by-tag-name
-func (s *RepositoriesService) GetReleaseByTag(owner, repo, tag string) (*RepositoryRelease, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases/tags/%s", owner, repo, tag)
- return s.getSingleRelease(u)
-}
-
-func (s *RepositoriesService) getSingleRelease(url string) (*RepositoryRelease, *Response, error) {
- req, err := s.client.NewRequest("GET", url, nil)
- if err != nil {
- return nil, nil, err
- }
-
- release := new(RepositoryRelease)
- resp, err := s.client.Do(req, release)
- if err != nil {
- return nil, resp, err
- }
- return release, resp, err
-}
-
-// CreateRelease adds a new release for a repository.
-//
-// GitHub API docs : http://developer.github.com/v3/repos/releases/#create-a-release
-func (s *RepositoriesService) CreateRelease(owner, repo string, release *RepositoryRelease) (*RepositoryRelease, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases", owner, repo)
-
- req, err := s.client.NewRequest("POST", u, release)
- if err != nil {
- return nil, nil, err
- }
-
- r := new(RepositoryRelease)
- resp, err := s.client.Do(req, r)
- if err != nil {
- return nil, resp, err
- }
- return r, resp, err
-}
-
-// EditRelease edits a repository release.
-//
-// GitHub API docs : http://developer.github.com/v3/repos/releases/#edit-a-release
-func (s *RepositoriesService) EditRelease(owner, repo string, id int, release *RepositoryRelease) (*RepositoryRelease, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id)
-
- req, err := s.client.NewRequest("PATCH", u, release)
- if err != nil {
- return nil, nil, err
- }
-
- r := new(RepositoryRelease)
- resp, err := s.client.Do(req, r)
- if err != nil {
- return nil, resp, err
- }
- return r, resp, err
-}
-
-// DeleteRelease delete a single release from a repository.
-//
-// GitHub API docs : http://developer.github.com/v3/repos/releases/#delete-a-release
-func (s *RepositoriesService) DeleteRelease(owner, repo string, id int) (*Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// ListReleaseAssets lists the release's assets.
-//
-// GitHub API docs : http://developer.github.com/v3/repos/releases/#list-assets-for-a-release
-func (s *RepositoriesService) ListReleaseAssets(owner, repo string, id int, opt *ListOptions) ([]*ReleaseAsset, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- assets := new([]*ReleaseAsset)
- resp, err := s.client.Do(req, assets)
- if err != nil {
- return nil, resp, nil
- }
- return *assets, resp, err
-}
-
-// GetReleaseAsset fetches a single release asset.
-//
-// GitHub API docs : http://developer.github.com/v3/repos/releases/#get-a-single-release-asset
-func (s *RepositoriesService) GetReleaseAsset(owner, repo string, id int) (*ReleaseAsset, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- asset := new(ReleaseAsset)
- resp, err := s.client.Do(req, asset)
- if err != nil {
- return nil, resp, nil
- }
- return asset, resp, err
-}
-
-// DownloadReleaseAsset downloads a release asset or returns a redirect URL.
-//
-// DownloadReleaseAsset returns an io.ReadCloser that reads the contents of the
-// specified release asset. It is the caller's responsibility to close the ReadCloser.
-// If a redirect is returned, the redirect URL will be returned as a string instead
-// of the io.ReadCloser. Exactly one of rc and redirectURL will be zero.
-//
-// GitHub API docs : http://developer.github.com/v3/repos/releases/#get-a-single-release-asset
-func (s *RepositoriesService) DownloadReleaseAsset(owner, repo string, id int) (rc io.ReadCloser, redirectURL string, err error) {
- u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, "", err
- }
- req.Header.Set("Accept", defaultMediaType)
-
- s.client.clientMu.Lock()
- defer s.client.clientMu.Unlock()
-
- var loc string
- saveRedirect := s.client.client.CheckRedirect
- s.client.client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
- loc = req.URL.String()
- return errors.New("disable redirect")
- }
- defer func() { s.client.client.CheckRedirect = saveRedirect }()
-
- resp, err := s.client.client.Do(req)
- if err != nil {
- if !strings.Contains(err.Error(), "disable redirect") {
- return nil, "", err
- }
- return nil, loc, nil
- }
-
- if err := CheckResponse(resp); err != nil {
- resp.Body.Close()
- return nil, "", err
- }
-
- return resp.Body, "", nil
-}
-
-// EditReleaseAsset edits a repository release asset.
-//
-// GitHub API docs : http://developer.github.com/v3/repos/releases/#edit-a-release-asset
-func (s *RepositoriesService) EditReleaseAsset(owner, repo string, id int, release *ReleaseAsset) (*ReleaseAsset, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
-
- req, err := s.client.NewRequest("PATCH", u, release)
- if err != nil {
- return nil, nil, err
- }
-
- asset := new(ReleaseAsset)
- resp, err := s.client.Do(req, asset)
- if err != nil {
- return nil, resp, err
- }
- return asset, resp, err
-}
-
-// DeleteReleaseAsset delete a single release asset from a repository.
-//
-// GitHub API docs : http://developer.github.com/v3/repos/releases/#delete-a-release-asset
-func (s *RepositoriesService) DeleteReleaseAsset(owner, repo string, id int) (*Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
- return s.client.Do(req, nil)
-}
-
-// UploadReleaseAsset creates an asset by uploading a file into a release repository.
-// To upload assets that cannot be represented by an os.File, call NewUploadRequest directly.
-//
-// GitHub API docs : http://developer.github.com/v3/repos/releases/#upload-a-release-asset
-func (s *RepositoriesService) UploadReleaseAsset(owner, repo string, id int, opt *UploadOptions, file *os.File) (*ReleaseAsset, *Response, error) {
- u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- stat, err := file.Stat()
- if err != nil {
- return nil, nil, err
- }
- if stat.IsDir() {
- return nil, nil, errors.New("the asset to upload can't be a directory")
- }
-
- mediaType := mime.TypeByExtension(filepath.Ext(file.Name()))
- req, err := s.client.NewUploadRequest(u, file, stat.Size(), mediaType)
- if err != nil {
- return nil, nil, err
- }
-
- asset := new(ReleaseAsset)
- resp, err := s.client.Do(req, asset)
- if err != nil {
- return nil, resp, err
- }
- return asset, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_releases_test.go b/vendor/src/github.com/google/go-github/github/repos_releases_test.go
deleted file mode 100644
index 412b245..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_releases_test.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "reflect"
- "strings"
- "testing"
-)
-
-func TestRepositoriesService_ListReleases(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- releases, _, err := client.Repositories.ListReleases("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListReleases returned error: %v", err)
- }
- want := []*RepositoryRelease{{ID: Int(1)}}
- if !reflect.DeepEqual(releases, want) {
- t.Errorf("Repositories.ListReleases returned %+v, want %+v", releases, want)
- }
-}
-
-func TestRepositoriesService_GetRelease(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- release, resp, err := client.Repositories.GetRelease("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.GetRelease returned error: %v\n%v", err, resp.Body)
- }
-
- want := &RepositoryRelease{ID: Int(1)}
- if !reflect.DeepEqual(release, want) {
- t.Errorf("Repositories.GetRelease returned %+v, want %+v", release, want)
- }
-}
-
-func TestRepositoriesService_GetLatestRelease(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/latest", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":3}`)
- })
-
- release, resp, err := client.Repositories.GetLatestRelease("o", "r")
- if err != nil {
- t.Errorf("Repositories.GetLatestRelease returned error: %v\n%v", err, resp.Body)
- }
-
- want := &RepositoryRelease{ID: Int(3)}
- if !reflect.DeepEqual(release, want) {
- t.Errorf("Repositories.GetLatestRelease returned %+v, want %+v", release, want)
- }
-}
-
-func TestRepositoriesService_GetReleaseByTag(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/tags/foo", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":13}`)
- })
-
- release, resp, err := client.Repositories.GetReleaseByTag("o", "r", "foo")
- if err != nil {
- t.Errorf("Repositories.GetReleaseByTag returned error: %v\n%v", err, resp.Body)
- }
-
- want := &RepositoryRelease{ID: Int(13)}
- if !reflect.DeepEqual(release, want) {
- t.Errorf("Repositories.GetReleaseByTag returned %+v, want %+v", release, want)
- }
-}
-
-func TestRepositoriesService_CreateRelease(t *testing.T) {
- setup()
- defer teardown()
-
- input := &RepositoryRelease{Name: String("v1.0")}
-
- mux.HandleFunc("/repos/o/r/releases", func(w http.ResponseWriter, r *http.Request) {
- v := new(RepositoryRelease)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
- fmt.Fprint(w, `{"id":1}`)
- })
-
- release, _, err := client.Repositories.CreateRelease("o", "r", input)
- if err != nil {
- t.Errorf("Repositories.CreateRelease returned error: %v", err)
- }
-
- want := &RepositoryRelease{ID: Int(1)}
- if !reflect.DeepEqual(release, want) {
- t.Errorf("Repositories.CreateRelease returned %+v, want %+v", release, want)
- }
-}
-
-func TestRepositoriesService_EditRelease(t *testing.T) {
- setup()
- defer teardown()
-
- input := &RepositoryRelease{Name: String("n")}
-
- mux.HandleFunc("/repos/o/r/releases/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(RepositoryRelease)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
- fmt.Fprint(w, `{"id":1}`)
- })
-
- release, _, err := client.Repositories.EditRelease("o", "r", 1, input)
- if err != nil {
- t.Errorf("Repositories.EditRelease returned error: %v", err)
- }
- want := &RepositoryRelease{ID: Int(1)}
- if !reflect.DeepEqual(release, want) {
- t.Errorf("Repositories.EditRelease returned = %+v, want %+v", release, want)
- }
-}
-
-func TestRepositoriesService_DeleteRelease(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Repositories.DeleteRelease("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.DeleteRelease returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_ListReleaseAssets(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/1/assets", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- assets, _, err := client.Repositories.ListReleaseAssets("o", "r", 1, opt)
- if err != nil {
- t.Errorf("Repositories.ListReleaseAssets returned error: %v", err)
- }
- want := []*ReleaseAsset{{ID: Int(1)}}
- if !reflect.DeepEqual(assets, want) {
- t.Errorf("Repositories.ListReleaseAssets returned %+v, want %+v", assets, want)
- }
-}
-
-func TestRepositoriesService_GetReleaseAsset(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- asset, _, err := client.Repositories.GetReleaseAsset("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.GetReleaseAsset returned error: %v", err)
- }
- want := &ReleaseAsset{ID: Int(1)}
- if !reflect.DeepEqual(asset, want) {
- t.Errorf("Repositories.GetReleaseAsset returned %+v, want %+v", asset, want)
- }
-}
-
-func TestRepositoriesService_DownloadReleaseAsset_Stream(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", defaultMediaType)
- w.Header().Set("Content-Type", "application/octet-stream")
- w.Header().Set("Content-Disposition", "attachment; filename=hello-world.txt")
- fmt.Fprint(w, "Hello World")
- })
-
- reader, _, err := client.Repositories.DownloadReleaseAsset("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.DownloadReleaseAsset returned error: %v", err)
- }
- want := []byte("Hello World")
- content, err := ioutil.ReadAll(reader)
- if err != nil {
- t.Errorf("Repositories.DownloadReleaseAsset returned bad reader: %v", err)
- }
- if !bytes.Equal(want, content) {
- t.Errorf("Repositories.DownloadReleaseAsset returned %+v, want %+v", content, want)
- }
-}
-
-func TestRepositoriesService_DownloadReleaseAsset_Redirect(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", defaultMediaType)
- http.Redirect(w, r, "/yo", http.StatusFound)
- })
-
- _, got, err := client.Repositories.DownloadReleaseAsset("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.DownloadReleaseAsset returned error: %v", err)
- }
- want := "/yo"
- if !strings.HasSuffix(got, want) {
- t.Errorf("Repositories.DownloadReleaseAsset returned %+v, want %+v", got, want)
- }
-}
-
-func TestRepositoriesService_DownloadReleaseAsset_APIError(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", defaultMediaType)
- w.WriteHeader(http.StatusNotFound)
- fmt.Fprint(w, `{"message":"Not Found","documentation_url":"https://developer.github.com/v3"}`)
- })
-
- resp, loc, err := client.Repositories.DownloadReleaseAsset("o", "r", 1)
- if err == nil {
- t.Error("Repositories.DownloadReleaseAsset did not return an error")
- }
-
- if resp != nil {
- resp.Close()
- t.Error("Repositories.DownloadReleaseAsset returned stream, want nil")
- }
-
- if loc != "" {
- t.Errorf(`Repositories.DownloadReleaseAsset returned "%s", want empty ""`, loc)
- }
-}
-
-func TestRepositoriesService_EditReleaseAsset(t *testing.T) {
- setup()
- defer teardown()
-
- input := &ReleaseAsset{Name: String("n")}
-
- mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
- v := new(ReleaseAsset)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
- fmt.Fprint(w, `{"id":1}`)
- })
-
- asset, _, err := client.Repositories.EditReleaseAsset("o", "r", 1, input)
- if err != nil {
- t.Errorf("Repositories.EditReleaseAsset returned error: %v", err)
- }
- want := &ReleaseAsset{ID: Int(1)}
- if !reflect.DeepEqual(asset, want) {
- t.Errorf("Repositories.EditReleaseAsset returned = %+v, want %+v", asset, want)
- }
-}
-
-func TestRepositoriesService_DeleteReleaseAsset(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/assets/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Repositories.DeleteReleaseAsset("o", "r", 1)
- if err != nil {
- t.Errorf("Repositories.DeleteReleaseAsset returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_UploadReleaseAsset(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/releases/1/assets", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "POST")
- testHeader(t, r, "Content-Type", "text/plain; charset=utf-8")
- testHeader(t, r, "Content-Length", "12")
- testFormValues(t, r, values{"name": "n"})
- testBody(t, r, "Upload me !\n")
-
- fmt.Fprintf(w, `{"id":1}`)
- })
-
- file, dir, err := openTestFile("upload.txt", "Upload me !\n")
- if err != nil {
- t.Fatalf("Unable to create temp file: %v", err)
- }
- defer os.RemoveAll(dir)
-
- opt := &UploadOptions{Name: "n"}
- asset, _, err := client.Repositories.UploadReleaseAsset("o", "r", 1, opt, file)
- if err != nil {
- t.Errorf("Repositories.UploadReleaseAssert returned error: %v", err)
- }
- want := &ReleaseAsset{ID: Int(1)}
- if !reflect.DeepEqual(asset, want) {
- t.Errorf("Repositories.UploadReleaseAssert returned %+v, want %+v", asset, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_stats.go b/vendor/src/github.com/google/go-github/github/repos_stats.go
deleted file mode 100644
index e4f75a5..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_stats.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// ContributorStats represents a contributor to a repository and their
-// weekly contributions to a given repo.
-type ContributorStats struct {
- Author *Contributor `json:"author,omitempty"`
- Total *int `json:"total,omitempty"`
- Weeks []WeeklyStats `json:"weeks,omitempty"`
-}
-
-func (c ContributorStats) String() string {
- return Stringify(c)
-}
-
-// WeeklyStats represents the number of additions, deletions and commits
-// a Contributor made in a given week.
-type WeeklyStats struct {
- Week *Timestamp `json:"w,omitempty"`
- Additions *int `json:"a,omitempty"`
- Deletions *int `json:"d,omitempty"`
- Commits *int `json:"c,omitempty"`
-}
-
-func (w WeeklyStats) String() string {
- return Stringify(w)
-}
-
-// ListContributorsStats gets a repo's contributor list with additions,
-// deletions and commit counts.
-//
-// If this is the first time these statistics are requested for the given
-// repository, this method will return a non-nil error and a status code of
-// 202. This is because this is the status that github returns to signify that
-// it is now computing the requested statistics. A follow up request, after a
-// delay of a second or so, should result in a successful request.
-//
-// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#contributors
-func (s *RepositoriesService) ListContributorsStats(owner, repo string) ([]*ContributorStats, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/stats/contributors", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var contributorStats []*ContributorStats
- resp, err := s.client.Do(req, &contributorStats)
- if err != nil {
- return nil, resp, err
- }
-
- return contributorStats, resp, err
-}
-
-// WeeklyCommitActivity represents the weekly commit activity for a repository.
-// The days array is a group of commits per day, starting on Sunday.
-type WeeklyCommitActivity struct {
- Days []int `json:"days,omitempty"`
- Total *int `json:"total,omitempty"`
- Week *Timestamp `json:"week,omitempty"`
-}
-
-func (w WeeklyCommitActivity) String() string {
- return Stringify(w)
-}
-
-// ListCommitActivity returns the last year of commit activity
-// grouped by week. The days array is a group of commits per day,
-// starting on Sunday.
-//
-// If this is the first time these statistics are requested for the given
-// repository, this method will return a non-nil error and a status code of
-// 202. This is because this is the status that github returns to signify that
-// it is now computing the requested statistics. A follow up request, after a
-// delay of a second or so, should result in a successful request.
-//
-// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#commit-activity
-func (s *RepositoriesService) ListCommitActivity(owner, repo string) ([]*WeeklyCommitActivity, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/stats/commit_activity", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var weeklyCommitActivity []*WeeklyCommitActivity
- resp, err := s.client.Do(req, &weeklyCommitActivity)
- if err != nil {
- return nil, resp, err
- }
-
- return weeklyCommitActivity, resp, err
-}
-
-// ListCodeFrequency returns a weekly aggregate of the number of additions and
-// deletions pushed to a repository. Returned WeeklyStats will contain
-// additions and deletions, but not total commits.
-//
-// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#code-frequency
-func (s *RepositoriesService) ListCodeFrequency(owner, repo string) ([]*WeeklyStats, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/stats/code_frequency", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var weeks [][]int
- resp, err := s.client.Do(req, &weeks)
-
- // convert int slices into WeeklyStats
- var stats []*WeeklyStats
- for _, week := range weeks {
- if len(week) != 3 {
- continue
- }
- stat := &WeeklyStats{
- Week: &Timestamp{time.Unix(int64(week[0]), 0)},
- Additions: Int(week[1]),
- Deletions: Int(week[2]),
- }
- stats = append(stats, stat)
- }
-
- return stats, resp, err
-}
-
-// RepositoryParticipation is the number of commits by everyone
-// who has contributed to the repository (including the owner)
-// as well as the number of commits by the owner themself.
-type RepositoryParticipation struct {
- All []int `json:"all,omitempty"`
- Owner []int `json:"owner,omitempty"`
-}
-
-func (r RepositoryParticipation) String() string {
- return Stringify(r)
-}
-
-// ListParticipation returns the total commit counts for the 'owner'
-// and total commit counts in 'all'. 'all' is everyone combined,
-// including the 'owner' in the last 52 weeks. If you’d like to get
-// the commit counts for non-owners, you can subtract 'all' from 'owner'.
-//
-// The array order is oldest week (index 0) to most recent week.
-//
-// If this is the first time these statistics are requested for the given
-// repository, this method will return a non-nil error and a status code
-// of 202. This is because this is the status that github returns to
-// signify that it is now computing the requested statistics. A follow
-// up request, after a delay of a second or so, should result in a
-// successful request.
-//
-// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#participation
-func (s *RepositoriesService) ListParticipation(owner, repo string) (*RepositoryParticipation, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/stats/participation", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- participation := new(RepositoryParticipation)
- resp, err := s.client.Do(req, participation)
- if err != nil {
- return nil, resp, err
- }
-
- return participation, resp, err
-}
-
-// PunchCard represents the number of commits made during a given hour of a
-// day of thew eek.
-type PunchCard struct {
- Day *int // Day of the week (0-6: =Sunday - Saturday).
- Hour *int // Hour of day (0-23).
- Commits *int // Number of commits.
-}
-
-// ListPunchCard returns the number of commits per hour in each day.
-//
-// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#punch-card
-func (s *RepositoriesService) ListPunchCard(owner, repo string) ([]*PunchCard, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/stats/punch_card", owner, repo)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- var results [][]int
- resp, err := s.client.Do(req, &results)
-
- // convert int slices into Punchcards
- var cards []*PunchCard
- for _, result := range results {
- if len(result) != 3 {
- continue
- }
- card := &PunchCard{
- Day: Int(result[0]),
- Hour: Int(result[1]),
- Commits: Int(result[2]),
- }
- cards = append(cards, card)
- }
-
- return cards, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_stats_test.go b/vendor/src/github.com/google/go-github/github/repos_stats_test.go
deleted file mode 100644
index 56acc56..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_stats_test.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
- "time"
-)
-
-func TestRepositoriesService_ListContributorsStats(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/stats/contributors", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
-
- fmt.Fprint(w, `
-[
- {
- "author": {
- "id": 1
- },
- "total": 135,
- "weeks": [
- {
- "w": 1367712000,
- "a": 6898,
- "d": 77,
- "c": 10
- }
- ]
- }
-]
-`)
- })
-
- stats, _, err := client.Repositories.ListContributorsStats("o", "r")
- if err != nil {
- t.Errorf("RepositoriesService.ListContributorsStats returned error: %v", err)
- }
-
- want := []*ContributorStats{
- {
- Author: &Contributor{
- ID: Int(1),
- },
- Total: Int(135),
- Weeks: []WeeklyStats{
- {
- Week: &Timestamp{time.Date(2013, 05, 05, 00, 00, 00, 0, time.UTC).Local()},
- Additions: Int(6898),
- Deletions: Int(77),
- Commits: Int(10),
- },
- },
- },
- }
-
- if !reflect.DeepEqual(stats, want) {
- t.Errorf("RepositoriesService.ListContributorsStats returned %+v, want %+v", stats, want)
- }
-}
-
-func TestRepositoriesService_ListCommitActivity(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/stats/commit_activity", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
-
- fmt.Fprint(w, `
-[
- {
- "days": [0, 3, 26, 20, 39, 1, 0],
- "total": 89,
- "week": 1336280400
- }
-]
-`)
- })
-
- activity, _, err := client.Repositories.ListCommitActivity("o", "r")
- if err != nil {
- t.Errorf("RepositoriesService.ListCommitActivity returned error: %v", err)
- }
-
- want := []*WeeklyCommitActivity{
- {
- Days: []int{0, 3, 26, 20, 39, 1, 0},
- Total: Int(89),
- Week: &Timestamp{time.Date(2012, 05, 06, 05, 00, 00, 0, time.UTC).Local()},
- },
- }
-
- if !reflect.DeepEqual(activity, want) {
- t.Errorf("RepositoriesService.ListCommitActivity returned %+v, want %+v", activity, want)
- }
-}
-
-func TestRepositoriesService_ListCodeFrequency(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/stats/code_frequency", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
-
- fmt.Fprint(w, `[[1302998400, 1124, -435]]`)
- })
-
- code, _, err := client.Repositories.ListCodeFrequency("o", "r")
- if err != nil {
- t.Errorf("RepositoriesService.ListCodeFrequency returned error: %v", err)
- }
-
- want := []*WeeklyStats{{
- Week: &Timestamp{time.Date(2011, 04, 17, 00, 00, 00, 0, time.UTC).Local()},
- Additions: Int(1124),
- Deletions: Int(-435),
- }}
-
- if !reflect.DeepEqual(code, want) {
- t.Errorf("RepositoriesService.ListCodeFrequency returned %+v, want %+v", code, want)
- }
-}
-
-func TestRepositoriesService_Participation(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/stats/participation", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
-
- fmt.Fprint(w, `
-{
- "all": [
- 11,21,15,2,8,1,8,23,17,21,11,10,33,
- 91,38,34,22,23,32,3,43,87,71,18,13,5,
- 13,16,66,27,12,45,110,117,13,8,18,9,19,
- 26,39,12,20,31,46,91,45,10,24,9,29,7
- ],
- "owner": [
- 3,2,3,0,2,0,5,14,7,9,1,5,0,
- 48,19,2,0,1,10,2,23,40,35,8,8,2,
- 10,6,30,0,2,9,53,104,3,3,10,4,7,
- 11,21,4,4,22,26,63,11,2,14,1,10,3
- ]
-}
-`)
- })
-
- participation, _, err := client.Repositories.ListParticipation("o", "r")
- if err != nil {
- t.Errorf("RepositoriesService.ListParticipation returned error: %v", err)
- }
-
- want := &RepositoryParticipation{
- All: []int{
- 11, 21, 15, 2, 8, 1, 8, 23, 17, 21, 11, 10, 33,
- 91, 38, 34, 22, 23, 32, 3, 43, 87, 71, 18, 13, 5,
- 13, 16, 66, 27, 12, 45, 110, 117, 13, 8, 18, 9, 19,
- 26, 39, 12, 20, 31, 46, 91, 45, 10, 24, 9, 29, 7,
- },
- Owner: []int{
- 3, 2, 3, 0, 2, 0, 5, 14, 7, 9, 1, 5, 0,
- 48, 19, 2, 0, 1, 10, 2, 23, 40, 35, 8, 8, 2,
- 10, 6, 30, 0, 2, 9, 53, 104, 3, 3, 10, 4, 7,
- 11, 21, 4, 4, 22, 26, 63, 11, 2, 14, 1, 10, 3,
- },
- }
-
- if !reflect.DeepEqual(participation, want) {
- t.Errorf("RepositoriesService.ListParticipation returned %+v, want %+v", participation, want)
- }
-}
-
-func TestRepositoriesService_ListPunchCard(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/stats/punch_card", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
-
- fmt.Fprint(w, `[
- [0, 0, 5],
- [0, 1, 43],
- [0, 2, 21]
- ]`)
- })
-
- card, _, err := client.Repositories.ListPunchCard("o", "r")
- if err != nil {
- t.Errorf("RepositoriesService.ListPunchCard returned error: %v", err)
- }
-
- want := []*PunchCard{
- {Day: Int(0), Hour: Int(0), Commits: Int(5)},
- {Day: Int(0), Hour: Int(1), Commits: Int(43)},
- {Day: Int(0), Hour: Int(2), Commits: Int(21)},
- }
-
- if !reflect.DeepEqual(card, want) {
- t.Errorf("RepositoriesService.ListPunchCard returned %+v, want %+v", card, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_statuses.go b/vendor/src/github.com/google/go-github/github/repos_statuses.go
deleted file mode 100644
index 6478ee2..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_statuses.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// RepoStatus represents the status of a repository at a particular reference.
-type RepoStatus struct {
- ID *int `json:"id,omitempty"`
- URL *string `json:"url,omitempty"`
-
- // State is the current state of the repository. Possible values are:
- // pending, success, error, or failure.
- State *string `json:"state,omitempty"`
-
- // TargetURL is the URL of the page representing this status. It will be
- // linked from the GitHub UI to allow users to see the source of the status.
- TargetURL *string `json:"target_url,omitempty"`
-
- // Description is a short high level summary of the status.
- Description *string `json:"description,omitempty"`
-
- // A string label to differentiate this status from the statuses of other systems.
- Context *string `json:"context,omitempty"`
-
- Creator *User `json:"creator,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
-}
-
-func (r RepoStatus) String() string {
- return Stringify(r)
-}
-
-// ListStatuses lists the statuses of a repository at the specified
-// reference. ref can be a SHA, a branch name, or a tag name.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/statuses/#list-statuses-for-a-specific-ref
-func (s *RepositoriesService) ListStatuses(owner, repo, ref string, opt *ListOptions) ([]*RepoStatus, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/commits/%v/statuses", owner, repo, ref)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- statuses := new([]*RepoStatus)
- resp, err := s.client.Do(req, statuses)
- if err != nil {
- return nil, resp, err
- }
-
- return *statuses, resp, err
-}
-
-// CreateStatus creates a new status for a repository at the specified
-// reference. Ref can be a SHA, a branch name, or a tag name.
-//
-// GitHub API docs: http://developer.github.com/v3/repos/statuses/#create-a-status
-func (s *RepositoriesService) CreateStatus(owner, repo, ref string, status *RepoStatus) (*RepoStatus, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/statuses/%v", owner, repo, ref)
- req, err := s.client.NewRequest("POST", u, status)
- if err != nil {
- return nil, nil, err
- }
-
- repoStatus := new(RepoStatus)
- resp, err := s.client.Do(req, repoStatus)
- if err != nil {
- return nil, resp, err
- }
-
- return repoStatus, resp, err
-}
-
-// CombinedStatus represents the combined status of a repository at a particular reference.
-type CombinedStatus struct {
- // State is the combined state of the repository. Possible values are:
- // failure, pending, or success.
- State *string `json:"state,omitempty"`
-
- Name *string `json:"name,omitempty"`
- SHA *string `json:"sha,omitempty"`
- TotalCount *int `json:"total_count,omitempty"`
- Statuses []RepoStatus `json:"statuses,omitempty"`
-
- CommitURL *string `json:"commit_url,omitempty"`
- RepositoryURL *string `json:"repository_url,omitempty"`
-}
-
-func (s CombinedStatus) String() string {
- return Stringify(s)
-}
-
-// GetCombinedStatus returns the combined status of a repository at the specified
-// reference. ref can be a SHA, a branch name, or a tag name.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/statuses/#get-the-combined-status-for-a-specific-ref
-func (s *RepositoriesService) GetCombinedStatus(owner, repo, ref string, opt *ListOptions) (*CombinedStatus, *Response, error) {
- u := fmt.Sprintf("repos/%v/%v/commits/%v/status", owner, repo, ref)
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- status := new(CombinedStatus)
- resp, err := s.client.Do(req, status)
- if err != nil {
- return nil, resp, err
- }
-
- return status, resp, err
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_statuses_test.go b/vendor/src/github.com/google/go-github/github/repos_statuses_test.go
deleted file mode 100644
index c1cfc12..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_statuses_test.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_ListStatuses(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/commits/r/statuses", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- statuses, _, err := client.Repositories.ListStatuses("o", "r", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListStatuses returned error: %v", err)
- }
-
- want := []*RepoStatus{{ID: Int(1)}}
- if !reflect.DeepEqual(statuses, want) {
- t.Errorf("Repositories.ListStatuses returned %+v, want %+v", statuses, want)
- }
-}
-
-func TestRepositoriesService_ListStatuses_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.ListStatuses("%", "r", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_CreateStatus(t *testing.T) {
- setup()
- defer teardown()
-
- input := &RepoStatus{State: String("s"), TargetURL: String("t"), Description: String("d")}
-
- mux.HandleFunc("/repos/o/r/statuses/r", func(w http.ResponseWriter, r *http.Request) {
- v := new(RepoStatus)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
- fmt.Fprint(w, `{"id":1}`)
- })
-
- status, _, err := client.Repositories.CreateStatus("o", "r", "r", input)
- if err != nil {
- t.Errorf("Repositories.CreateStatus returned error: %v", err)
- }
-
- want := &RepoStatus{ID: Int(1)}
- if !reflect.DeepEqual(status, want) {
- t.Errorf("Repositories.CreateStatus returned %+v, want %+v", status, want)
- }
-}
-
-func TestRepositoriesService_CreateStatus_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.CreateStatus("%", "r", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_GetCombinedStatus(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/commits/r/status", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `{"state":"success", "statuses":[{"id":1}]}`)
- })
-
- opt := &ListOptions{Page: 2}
- status, _, err := client.Repositories.GetCombinedStatus("o", "r", "r", opt)
- if err != nil {
- t.Errorf("Repositories.GetCombinedStatus returned error: %v", err)
- }
-
- want := &CombinedStatus{State: String("success"), Statuses: []RepoStatus{{ID: Int(1)}}}
- if !reflect.DeepEqual(status, want) {
- t.Errorf("Repositories.GetCombinedStatus returned %+v, want %+v", status, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/repos_test.go b/vendor/src/github.com/google/go-github/github/repos_test.go
deleted file mode 100644
index 9f738f9..0000000
--- a/vendor/src/github.com/google/go-github/github/repos_test.go
+++ /dev/null
@@ -1,537 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestRepositoriesService_List_authenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/repos", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeLicensesPreview)
- fmt.Fprint(w, `[{"id":1},{"id":2}]`)
- })
-
- repos, _, err := client.Repositories.List("", nil)
- if err != nil {
- t.Errorf("Repositories.List returned error: %v", err)
- }
-
- want := []*Repository{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(repos, want) {
- t.Errorf("Repositories.List returned %+v, want %+v", repos, want)
- }
-}
-
-func TestRepositoriesService_List_specifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/repos", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeLicensesPreview)
- testFormValues(t, r, values{
- "visibility": "public",
- "affiliation": "owner,collaborator",
- "sort": "created",
- "direction": "asc",
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &RepositoryListOptions{
- Visibility: "public",
- Affiliation: "owner,collaborator",
- Sort: "created",
- Direction: "asc",
- ListOptions: ListOptions{Page: 2},
- }
- repos, _, err := client.Repositories.List("u", opt)
- if err != nil {
- t.Errorf("Repositories.List returned error: %v", err)
- }
-
- want := []*Repository{{ID: Int(1)}}
- if !reflect.DeepEqual(repos, want) {
- t.Errorf("Repositories.List returned %+v, want %+v", repos, want)
- }
-}
-
-func TestRepositoriesService_List_specifiedUser_type(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/repos", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeLicensesPreview)
- testFormValues(t, r, values{
- "type": "owner",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &RepositoryListOptions{
- Type: "owner",
- }
- repos, _, err := client.Repositories.List("u", opt)
- if err != nil {
- t.Errorf("Repositories.List returned error: %v", err)
- }
-
- want := []*Repository{{ID: Int(1)}}
- if !reflect.DeepEqual(repos, want) {
- t.Errorf("Repositories.List returned %+v, want %+v", repos, want)
- }
-}
-
-func TestRepositoriesService_List_invalidUser(t *testing.T) {
- _, _, err := client.Repositories.List("%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_ListByOrg(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/orgs/o/repos", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeLicensesPreview)
- testFormValues(t, r, values{
- "type": "forks",
- "page": "2",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &RepositoryListByOrgOptions{"forks", ListOptions{Page: 2}}
- repos, _, err := client.Repositories.ListByOrg("o", opt)
- if err != nil {
- t.Errorf("Repositories.ListByOrg returned error: %v", err)
- }
-
- want := []*Repository{{ID: Int(1)}}
- if !reflect.DeepEqual(repos, want) {
- t.Errorf("Repositories.ListByOrg returned %+v, want %+v", repos, want)
- }
-}
-
-func TestRepositoriesService_ListByOrg_invalidOrg(t *testing.T) {
- _, _, err := client.Repositories.ListByOrg("%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_ListAll(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repositories", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "since": "1",
- "page": "2",
- "per_page": "3",
- })
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &RepositoryListAllOptions{1, ListOptions{2, 3}}
- repos, _, err := client.Repositories.ListAll(opt)
- if err != nil {
- t.Errorf("Repositories.ListAll returned error: %v", err)
- }
-
- want := []*Repository{{ID: Int(1)}}
- if !reflect.DeepEqual(repos, want) {
- t.Errorf("Repositories.ListAll returned %+v, want %+v", repos, want)
- }
-}
-
-func TestRepositoriesService_Create_user(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Repository{Name: String("n")}
-
- mux.HandleFunc("/user/repos", func(w http.ResponseWriter, r *http.Request) {
- v := new(Repository)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- repo, _, err := client.Repositories.Create("", input)
- if err != nil {
- t.Errorf("Repositories.Create returned error: %v", err)
- }
-
- want := &Repository{ID: Int(1)}
- if !reflect.DeepEqual(repo, want) {
- t.Errorf("Repositories.Create returned %+v, want %+v", repo, want)
- }
-}
-
-func TestRepositoriesService_Create_org(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Repository{Name: String("n")}
-
- mux.HandleFunc("/orgs/o/repos", func(w http.ResponseWriter, r *http.Request) {
- v := new(Repository)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- repo, _, err := client.Repositories.Create("o", input)
- if err != nil {
- t.Errorf("Repositories.Create returned error: %v", err)
- }
-
- want := &Repository{ID: Int(1)}
- if !reflect.DeepEqual(repo, want) {
- t.Errorf("Repositories.Create returned %+v, want %+v", repo, want)
- }
-}
-
-func TestRepositoriesService_Create_invalidOrg(t *testing.T) {
- _, _, err := client.Repositories.Create("%", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_Get(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeLicensesPreview)
- fmt.Fprint(w, `{"id":1,"name":"n","description":"d","owner":{"login":"l"},"license":{"key":"mit"}}`)
- })
-
- repo, _, err := client.Repositories.Get("o", "r")
- if err != nil {
- t.Errorf("Repositories.Get returned error: %v", err)
- }
-
- want := &Repository{ID: Int(1), Name: String("n"), Description: String("d"), Owner: &User{Login: String("l")}, License: &License{Key: String("mit")}}
- if !reflect.DeepEqual(repo, want) {
- t.Errorf("Repositories.Get returned %+v, want %+v", repo, want)
- }
-}
-
-func TestRepositoriesService_GetByID(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repositories/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeLicensesPreview)
- fmt.Fprint(w, `{"id":1,"name":"n","description":"d","owner":{"login":"l"},"license":{"key":"mit"}}`)
- })
-
- repo, _, err := client.Repositories.GetByID(1)
- if err != nil {
- t.Errorf("Repositories.GetByID returned error: %v", err)
- }
-
- want := &Repository{ID: Int(1), Name: String("n"), Description: String("d"), Owner: &User{Login: String("l")}, License: &License{Key: String("mit")}}
- if !reflect.DeepEqual(repo, want) {
- t.Errorf("Repositories.GetByID returned %+v, want %+v", repo, want)
- }
-}
-
-func TestRepositoriesService_Edit(t *testing.T) {
- setup()
- defer teardown()
-
- i := true
- input := &Repository{HasIssues: &i}
-
- mux.HandleFunc("/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
- v := new(Repository)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
- fmt.Fprint(w, `{"id":1}`)
- })
-
- repo, _, err := client.Repositories.Edit("o", "r", input)
- if err != nil {
- t.Errorf("Repositories.Edit returned error: %v", err)
- }
-
- want := &Repository{ID: Int(1)}
- if !reflect.DeepEqual(repo, want) {
- t.Errorf("Repositories.Edit returned %+v, want %+v", repo, want)
- }
-}
-
-func TestRepositoriesService_Delete(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Repositories.Delete("o", "r")
- if err != nil {
- t.Errorf("Repositories.Delete returned error: %v", err)
- }
-}
-
-func TestRepositoriesService_Get_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.Get("%", "r")
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_Edit_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.Edit("%", "r", nil)
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_ListContributors(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/contributors", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "anon": "true",
- "page": "2",
- })
- fmt.Fprint(w, `[{"contributions":42}]`)
- })
-
- opts := &ListContributorsOptions{Anon: "true", ListOptions: ListOptions{Page: 2}}
- contributors, _, err := client.Repositories.ListContributors("o", "r", opts)
- if err != nil {
- t.Errorf("Repositories.ListContributors returned error: %v", err)
- }
-
- want := []*Contributor{{Contributions: Int(42)}}
- if !reflect.DeepEqual(contributors, want) {
- t.Errorf("Repositories.ListContributors returned %+v, want %+v", contributors, want)
- }
-}
-
-func TestRepositoriesService_ListLanguages(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/languages", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"go":1}`)
- })
-
- languages, _, err := client.Repositories.ListLanguages("o", "r")
- if err != nil {
- t.Errorf("Repositories.ListLanguages returned error: %v", err)
- }
-
- want := map[string]int{"go": 1}
- if !reflect.DeepEqual(languages, want) {
- t.Errorf("Repositories.ListLanguages returned %+v, want %+v", languages, want)
- }
-}
-
-func TestRepositoriesService_ListTeams(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/teams", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- teams, _, err := client.Repositories.ListTeams("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListTeams returned error: %v", err)
- }
-
- want := []*Team{{ID: Int(1)}}
- if !reflect.DeepEqual(teams, want) {
- t.Errorf("Repositories.ListTeams returned %+v, want %+v", teams, want)
- }
-}
-
-func TestRepositoriesService_ListTags(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/tags", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"name":"n", "commit" : {"sha" : "s", "url" : "u"}, "zipball_url": "z", "tarball_url": "t"}]`)
- })
-
- opt := &ListOptions{Page: 2}
- tags, _, err := client.Repositories.ListTags("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListTags returned error: %v", err)
- }
-
- want := []*RepositoryTag{
- {
- Name: String("n"),
- Commit: &Commit{
- SHA: String("s"),
- URL: String("u"),
- },
- ZipballURL: String("z"),
- TarballURL: String("t"),
- },
- }
- if !reflect.DeepEqual(tags, want) {
- t.Errorf("Repositories.ListTags returned %+v, want %+v", tags, want)
- }
-}
-
-func TestRepositoriesService_ListBranches(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/branches", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview)
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"name":"master", "commit" : {"sha" : "a57781", "url" : "https://api.github.com/repos/o/r/commits/a57781"}}]`)
- })
-
- opt := &ListOptions{Page: 2}
- branches, _, err := client.Repositories.ListBranches("o", "r", opt)
- if err != nil {
- t.Errorf("Repositories.ListBranches returned error: %v", err)
- }
-
- want := []*Branch{{Name: String("master"), Commit: &Commit{SHA: String("a57781"), URL: String("https://api.github.com/repos/o/r/commits/a57781")}}}
- if !reflect.DeepEqual(branches, want) {
- t.Errorf("Repositories.ListBranches returned %+v, want %+v", branches, want)
- }
-}
-
-func TestRepositoriesService_GetBranch(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/branches/b", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview)
- fmt.Fprint(w, `{"name":"n", "commit":{"sha":"s"}, "protection": {"enabled": true, "required_status_checks": {"enforcement_level": "everyone","contexts": []}}}`)
- })
-
- branch, _, err := client.Repositories.GetBranch("o", "r", "b")
- if err != nil {
- t.Errorf("Repositories.GetBranch returned error: %v", err)
- }
-
- want := &Branch{
- Name: String("n"),
- Commit: &Commit{SHA: String("s")},
- Protection: &Protection{
- Enabled: Bool(true),
- RequiredStatusChecks: &RequiredStatusChecks{
- EnforcementLevel: String("everyone"),
- Contexts: &[]string{},
- },
- },
- }
-
- if !reflect.DeepEqual(branch, want) {
- t.Errorf("Repositories.GetBranch returned %+v, want %+v", branch, want)
- }
-}
-
-func TestRepositoriesService_EditBranch(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Branch{
- Protection: &Protection{
- Enabled: Bool(true),
- RequiredStatusChecks: &RequiredStatusChecks{
- EnforcementLevel: String("everyone"),
- Contexts: &[]string{"continous-integration"},
- },
- },
- }
-
- mux.HandleFunc("/repos/o/r/branches/b", func(w http.ResponseWriter, r *http.Request) {
- v := new(Branch)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
- testHeader(t, r, "Accept", mediaTypeProtectedBranchesPreview)
- fmt.Fprint(w, `{"protection": {"enabled": true, "required_status_checks": {"enforcement_level": "everyone", "contexts": ["continous-integration"]}}}`)
- })
-
- branch, _, err := client.Repositories.EditBranch("o", "r", "b", input)
- if err != nil {
- t.Errorf("Repositories.EditBranch returned error: %v", err)
- }
-
- if !reflect.DeepEqual(branch, input) {
- t.Errorf("Repositories.EditBranch returned %+v, want %+v", branch, input)
- }
-}
-
-func TestRepositoriesService_ListLanguages_invalidOwner(t *testing.T) {
- _, _, err := client.Repositories.ListLanguages("%", "%")
- testURLParseError(t, err)
-}
-
-func TestRepositoriesService_License(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/repos/o/r/license", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"license":{"key":"mit","name":"MIT License","url":"https://api.github.com/licenses/mit","featured":true}}`)
- })
-
- got, _, err := client.Repositories.License("o", "r")
- if err != nil {
- t.Errorf("Repositories.License returned error: %v", err)
- }
-
- want := &License{
- Name: String("MIT License"),
- Key: String("mit"),
- URL: String("https://api.github.com/licenses/mit"),
- Featured: Bool(true),
- }
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Repositories.License returned %+v, want %+v", got, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/search.go b/vendor/src/github.com/google/go-github/github/search.go
deleted file mode 100644
index 0c7ffcb..0000000
--- a/vendor/src/github.com/google/go-github/github/search.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
-
- qs "github.com/google/go-querystring/query"
-)
-
-// SearchService provides access to the search related functions
-// in the GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/search/
-type SearchService service
-
-// SearchOptions specifies optional parameters to the SearchService methods.
-type SearchOptions struct {
- // How to sort the search results. Possible values are:
- // - for repositories: stars, fork, updated
- // - for code: indexed
- // - for issues: comments, created, updated
- // - for users: followers, repositories, joined
- //
- // Default is to sort by best match.
- Sort string `url:"sort,omitempty"`
-
- // Sort order if sort parameter is provided. Possible values are: asc,
- // desc. Default is desc.
- Order string `url:"order,omitempty"`
-
- // Whether to retrieve text match metadata with a query
- TextMatch bool `url:"-"`
-
- ListOptions
-}
-
-// RepositoriesSearchResult represents the result of a repositories search.
-type RepositoriesSearchResult struct {
- Total *int `json:"total_count,omitempty"`
- Repositories []Repository `json:"items,omitempty"`
-}
-
-// Repositories searches repositories via various criteria.
-//
-// GitHub API docs: http://developer.github.com/v3/search/#search-repositories
-func (s *SearchService) Repositories(query string, opt *SearchOptions) (*RepositoriesSearchResult, *Response, error) {
- result := new(RepositoriesSearchResult)
- resp, err := s.search("repositories", query, opt, result)
- return result, resp, err
-}
-
-// IssuesSearchResult represents the result of an issues search.
-type IssuesSearchResult struct {
- Total *int `json:"total_count,omitempty"`
- Issues []Issue `json:"items,omitempty"`
-}
-
-// Issues searches issues via various criteria.
-//
-// GitHub API docs: http://developer.github.com/v3/search/#search-issues
-func (s *SearchService) Issues(query string, opt *SearchOptions) (*IssuesSearchResult, *Response, error) {
- result := new(IssuesSearchResult)
- resp, err := s.search("issues", query, opt, result)
- return result, resp, err
-}
-
-// UsersSearchResult represents the result of an issues search.
-type UsersSearchResult struct {
- Total *int `json:"total_count,omitempty"`
- Users []User `json:"items,omitempty"`
-}
-
-// Users searches users via various criteria.
-//
-// GitHub API docs: http://developer.github.com/v3/search/#search-users
-func (s *SearchService) Users(query string, opt *SearchOptions) (*UsersSearchResult, *Response, error) {
- result := new(UsersSearchResult)
- resp, err := s.search("users", query, opt, result)
- return result, resp, err
-}
-
-// Match represents a single text match.
-type Match struct {
- Text *string `json:"text,omitempty"`
- Indices []int `json:"indices,omitempty"`
-}
-
-// TextMatch represents a text match for a SearchResult
-type TextMatch struct {
- ObjectURL *string `json:"object_url,omitempty"`
- ObjectType *string `json:"object_type,omitempty"`
- Property *string `json:"property,omitempty"`
- Fragment *string `json:"fragment,omitempty"`
- Matches []Match `json:"matches,omitempty"`
-}
-
-func (tm TextMatch) String() string {
- return Stringify(tm)
-}
-
-// CodeSearchResult represents the result of an code search.
-type CodeSearchResult struct {
- Total *int `json:"total_count,omitempty"`
- CodeResults []CodeResult `json:"items,omitempty"`
-}
-
-// CodeResult represents a single search result.
-type CodeResult struct {
- Name *string `json:"name,omitempty"`
- Path *string `json:"path,omitempty"`
- SHA *string `json:"sha,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- Repository *Repository `json:"repository,omitempty"`
- TextMatches []TextMatch `json:"text_matches,omitempty"`
-}
-
-func (c CodeResult) String() string {
- return Stringify(c)
-}
-
-// Code searches code via various criteria.
-//
-// GitHub API docs: http://developer.github.com/v3/search/#search-code
-func (s *SearchService) Code(query string, opt *SearchOptions) (*CodeSearchResult, *Response, error) {
- result := new(CodeSearchResult)
- resp, err := s.search("code", query, opt, result)
- return result, resp, err
-}
-
-// Helper function that executes search queries against different
-// GitHub search types (repositories, code, issues, users)
-func (s *SearchService) search(searchType string, query string, opt *SearchOptions, result interface{}) (*Response, error) {
- params, err := qs.Values(opt)
- if err != nil {
- return nil, err
- }
- params.Add("q", query)
- u := fmt.Sprintf("search/%s?%s", searchType, params.Encode())
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, err
- }
-
- if opt != nil && opt.TextMatch {
- // Accept header defaults to "application/vnd.github.v3+json"
- // We change it here to fetch back text-match metadata
- req.Header.Set("Accept", "application/vnd.github.v3.text-match+json")
- }
-
- return s.client.Do(req, result)
-}
diff --git a/vendor/src/github.com/google/go-github/github/search_test.go b/vendor/src/github.com/google/go-github/github/search_test.go
deleted file mode 100644
index b36043b..0000000
--- a/vendor/src/github.com/google/go-github/github/search_test.go
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
-
- "testing"
-)
-
-func TestSearchService_Repositories(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/search/repositories", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "q": "blah",
- "sort": "forks",
- "order": "desc",
- "page": "2",
- "per_page": "2",
- })
-
- fmt.Fprint(w, `{"total_count": 4, "items": [{"id":1},{"id":2}]}`)
- })
-
- opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}}
- result, _, err := client.Search.Repositories("blah", opts)
- if err != nil {
- t.Errorf("Search.Repositories returned error: %v", err)
- }
-
- want := &RepositoriesSearchResult{
- Total: Int(4),
- Repositories: []Repository{{ID: Int(1)}, {ID: Int(2)}},
- }
- if !reflect.DeepEqual(result, want) {
- t.Errorf("Search.Repositories returned %+v, want %+v", result, want)
- }
-}
-
-func TestSearchService_Issues(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/search/issues", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "q": "blah",
- "sort": "forks",
- "order": "desc",
- "page": "2",
- "per_page": "2",
- })
-
- fmt.Fprint(w, `{"total_count": 4, "items": [{"number":1},{"number":2}]}`)
- })
-
- opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}}
- result, _, err := client.Search.Issues("blah", opts)
- if err != nil {
- t.Errorf("Search.Issues returned error: %v", err)
- }
-
- want := &IssuesSearchResult{
- Total: Int(4),
- Issues: []Issue{{Number: Int(1)}, {Number: Int(2)}},
- }
- if !reflect.DeepEqual(result, want) {
- t.Errorf("Search.Issues returned %+v, want %+v", result, want)
- }
-}
-
-func TestSearchService_Users(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/search/users", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "q": "blah",
- "sort": "forks",
- "order": "desc",
- "page": "2",
- "per_page": "2",
- })
-
- fmt.Fprint(w, `{"total_count": 4, "items": [{"id":1},{"id":2}]}`)
- })
-
- opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}}
- result, _, err := client.Search.Users("blah", opts)
- if err != nil {
- t.Errorf("Search.Issues returned error: %v", err)
- }
-
- want := &UsersSearchResult{
- Total: Int(4),
- Users: []User{{ID: Int(1)}, {ID: Int(2)}},
- }
- if !reflect.DeepEqual(result, want) {
- t.Errorf("Search.Users returned %+v, want %+v", result, want)
- }
-}
-
-func TestSearchService_Code(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/search/code", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{
- "q": "blah",
- "sort": "forks",
- "order": "desc",
- "page": "2",
- "per_page": "2",
- })
-
- fmt.Fprint(w, `{"total_count": 4, "items": [{"name":"1"},{"name":"2"}]}`)
- })
-
- opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}}
- result, _, err := client.Search.Code("blah", opts)
- if err != nil {
- t.Errorf("Search.Code returned error: %v", err)
- }
-
- want := &CodeSearchResult{
- Total: Int(4),
- CodeResults: []CodeResult{{Name: String("1")}, {Name: String("2")}},
- }
- if !reflect.DeepEqual(result, want) {
- t.Errorf("Search.Code returned %+v, want %+v", result, want)
- }
-}
-
-func TestSearchService_CodeTextMatch(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/search/code", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
-
- textMatchResponse := `
- {
- "total_count": 1,
- "items": [
- {
- "name":"gopher1",
- "text_matches": [
- {
- "fragment": "I'm afraid my friend what you have found\nIs a gopher who lives to feed",
- "matches": [
- {
- "text": "gopher",
- "indices": [
- 14,
- 21
- ]
- }
- ]
- }
- ]
- }
- ]
- }
- `
-
- fmt.Fprint(w, textMatchResponse)
- })
-
- opts := &SearchOptions{Sort: "forks", Order: "desc", ListOptions: ListOptions{Page: 2, PerPage: 2}, TextMatch: true}
- result, _, err := client.Search.Code("blah", opts)
- if err != nil {
- t.Errorf("Search.Code returned error: %v", err)
- }
-
- wantedCodeResult := CodeResult{
- Name: String("gopher1"),
- TextMatches: []TextMatch{{
- Fragment: String("I'm afraid my friend what you have found\nIs a gopher who lives to feed"),
- Matches: []Match{{Text: String("gopher"), Indices: []int{14, 21}}},
- },
- },
- }
-
- want := &CodeSearchResult{
- Total: Int(1),
- CodeResults: []CodeResult{wantedCodeResult},
- }
- if !reflect.DeepEqual(result, want) {
- t.Errorf("Search.Code returned %+v, want %+v", result, want)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/strings.go b/vendor/src/github.com/google/go-github/github/strings.go
deleted file mode 100644
index 3857723..0000000
--- a/vendor/src/github.com/google/go-github/github/strings.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "bytes"
- "fmt"
- "io"
-
- "reflect"
-)
-
-var timestampType = reflect.TypeOf(Timestamp{})
-
-// Stringify attempts to create a reasonable string representation of types in
-// the GitHub library. It does things like resolve pointers to their values
-// and omits struct fields with nil values.
-func Stringify(message interface{}) string {
- var buf bytes.Buffer
- v := reflect.ValueOf(message)
- stringifyValue(&buf, v)
- return buf.String()
-}
-
-// stringifyValue was heavily inspired by the goprotobuf library.
-
-func stringifyValue(w io.Writer, val reflect.Value) {
- if val.Kind() == reflect.Ptr && val.IsNil() {
- w.Write([]byte(""))
- return
- }
-
- v := reflect.Indirect(val)
-
- switch v.Kind() {
- case reflect.String:
- fmt.Fprintf(w, `"%s"`, v)
- case reflect.Slice:
- w.Write([]byte{'['})
- for i := 0; i < v.Len(); i++ {
- if i > 0 {
- w.Write([]byte{' '})
- }
-
- stringifyValue(w, v.Index(i))
- }
-
- w.Write([]byte{']'})
- return
- case reflect.Struct:
- if v.Type().Name() != "" {
- w.Write([]byte(v.Type().String()))
- }
-
- // special handling of Timestamp values
- if v.Type() == timestampType {
- fmt.Fprintf(w, "{%s}", v.Interface())
- return
- }
-
- w.Write([]byte{'{'})
-
- var sep bool
- for i := 0; i < v.NumField(); i++ {
- fv := v.Field(i)
- if fv.Kind() == reflect.Ptr && fv.IsNil() {
- continue
- }
- if fv.Kind() == reflect.Slice && fv.IsNil() {
- continue
- }
-
- if sep {
- w.Write([]byte(", "))
- } else {
- sep = true
- }
-
- w.Write([]byte(v.Type().Field(i).Name))
- w.Write([]byte{':'})
- stringifyValue(w, fv)
- }
-
- w.Write([]byte{'}'})
- default:
- if v.CanInterface() {
- fmt.Fprint(w, v.Interface())
- }
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/strings_test.go b/vendor/src/github.com/google/go-github/github/strings_test.go
deleted file mode 100644
index a393eb6..0000000
--- a/vendor/src/github.com/google/go-github/github/strings_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "testing"
- "time"
-)
-
-func TestStringify(t *testing.T) {
- var nilPointer *string
-
- var tests = []struct {
- in interface{}
- out string
- }{
- // basic types
- {"foo", `"foo"`},
- {123, `123`},
- {1.5, `1.5`},
- {false, `false`},
- {
- []string{"a", "b"},
- `["a" "b"]`,
- },
- {
- struct {
- A []string
- }{nil},
- // nil slice is skipped
- `{}`,
- },
- {
- struct {
- A string
- }{"foo"},
- // structs not of a named type get no prefix
- `{A:"foo"}`,
- },
-
- // pointers
- {nilPointer, ``},
- {String("foo"), `"foo"`},
- {Int(123), `123`},
- {Bool(false), `false`},
- {
- []*string{String("a"), String("b")},
- `["a" "b"]`,
- },
-
- // actual GitHub structs
- {
- Timestamp{time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)},
- `github.Timestamp{2006-01-02 15:04:05 +0000 UTC}`,
- },
- {
- &Timestamp{time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)},
- `github.Timestamp{2006-01-02 15:04:05 +0000 UTC}`,
- },
- {
- User{ID: Int(123), Name: String("n")},
- `github.User{ID:123, Name:"n"}`,
- },
- {
- Repository{Owner: &User{ID: Int(123)}},
- `github.Repository{Owner:github.User{ID:123}}`,
- },
- }
-
- for i, tt := range tests {
- s := Stringify(tt.in)
- if s != tt.out {
- t.Errorf("%d. Stringify(%q) => %q, want %q", i, tt.in, s, tt.out)
- }
- }
-}
-
-// Directly test the String() methods on various GitHub types. We don't do an
-// exaustive test of all the various field types, since TestStringify() above
-// takes care of that. Rather, we just make sure that Stringify() is being
-// used to build the strings, which we do by verifying that pointers are
-// stringified as their underlying value.
-func TestString(t *testing.T) {
- var tests = []struct {
- in interface{}
- out string
- }{
- {CodeResult{Name: String("n")}, `github.CodeResult{Name:"n"}`},
- {CommitAuthor{Name: String("n")}, `github.CommitAuthor{Name:"n"}`},
- {CommitFile{SHA: String("s")}, `github.CommitFile{SHA:"s"}`},
- {CommitStats{Total: Int(1)}, `github.CommitStats{Total:1}`},
- {CommitsComparison{TotalCommits: Int(1)}, `github.CommitsComparison{TotalCommits:1}`},
- {Commit{SHA: String("s")}, `github.Commit{SHA:"s"}`},
- {Event{ID: String("1")}, `github.Event{ID:"1"}`},
- {GistComment{ID: Int(1)}, `github.GistComment{ID:1}`},
- {GistFile{Size: Int(1)}, `github.GistFile{Size:1}`},
- {Gist{ID: String("1")}, `github.Gist{ID:"1", Files:map[]}`},
- {GitObject{SHA: String("s")}, `github.GitObject{SHA:"s"}`},
- {Gitignore{Name: String("n")}, `github.Gitignore{Name:"n"}`},
- {Hook{ID: Int(1)}, `github.Hook{Config:map[], ID:1}`},
- {IssueComment{ID: Int(1)}, `github.IssueComment{ID:1}`},
- {Issue{Number: Int(1)}, `github.Issue{Number:1}`},
- {Key{ID: Int(1)}, `github.Key{ID:1}`},
- {Label{Name: String("l")}, "l"},
- {Organization{ID: Int(1)}, `github.Organization{ID:1}`},
- {PullRequestComment{ID: Int(1)}, `github.PullRequestComment{ID:1}`},
- {PullRequest{Number: Int(1)}, `github.PullRequest{Number:1}`},
- {PushEventCommit{SHA: String("s")}, `github.PushEventCommit{SHA:"s"}`},
- {PushEvent{PushID: Int(1)}, `github.PushEvent{PushID:1}`},
- {Reference{Ref: String("r")}, `github.Reference{Ref:"r"}`},
- {ReleaseAsset{ID: Int(1)}, `github.ReleaseAsset{ID:1}`},
- {RepoStatus{ID: Int(1)}, `github.RepoStatus{ID:1}`},
- {RepositoryComment{ID: Int(1)}, `github.RepositoryComment{ID:1}`},
- {RepositoryCommit{SHA: String("s")}, `github.RepositoryCommit{SHA:"s"}`},
- {RepositoryContent{Name: String("n")}, `github.RepositoryContent{Name:"n"}`},
- {RepositoryRelease{ID: Int(1)}, `github.RepositoryRelease{ID:1}`},
- {Repository{ID: Int(1)}, `github.Repository{ID:1}`},
- {Team{ID: Int(1)}, `github.Team{ID:1}`},
- {TreeEntry{SHA: String("s")}, `github.TreeEntry{SHA:"s"}`},
- {Tree{SHA: String("s")}, `github.Tree{SHA:"s"}`},
- {User{ID: Int(1)}, `github.User{ID:1}`},
- {WebHookAuthor{Name: String("n")}, `github.WebHookAuthor{Name:"n"}`},
- {WebHookCommit{ID: String("1")}, `github.WebHookCommit{ID:"1"}`},
- {WebHookPayload{Ref: String("r")}, `github.WebHookPayload{Ref:"r"}`},
- }
-
- for i, tt := range tests {
- s := tt.in.(fmt.Stringer).String()
- if s != tt.out {
- t.Errorf("%d. String() => %q, want %q", i, tt.in, tt.out)
- }
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/timestamp.go b/vendor/src/github.com/google/go-github/github/timestamp.go
deleted file mode 100644
index a1c1554..0000000
--- a/vendor/src/github.com/google/go-github/github/timestamp.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "strconv"
- "time"
-)
-
-// Timestamp represents a time that can be unmarshalled from a JSON string
-// formatted as either an RFC3339 or Unix timestamp. This is necessary for some
-// fields since the GitHub API is inconsistent in how it represents times. All
-// exported methods of time.Time can be called on Timestamp.
-type Timestamp struct {
- time.Time
-}
-
-func (t Timestamp) String() string {
- return t.Time.String()
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface.
-// Time is expected in RFC3339 or Unix format.
-func (t *Timestamp) UnmarshalJSON(data []byte) (err error) {
- str := string(data)
- i, err := strconv.ParseInt(str, 10, 64)
- if err == nil {
- (*t).Time = time.Unix(i, 0)
- } else {
- (*t).Time, err = time.Parse(`"`+time.RFC3339+`"`, str)
- }
- return
-}
-
-// Equal reports whether t and u are equal based on time.Equal
-func (t Timestamp) Equal(u Timestamp) bool {
- return t.Time.Equal(u.Time)
-}
diff --git a/vendor/src/github.com/google/go-github/github/timestamp_test.go b/vendor/src/github.com/google/go-github/github/timestamp_test.go
deleted file mode 100644
index 12376c5..0000000
--- a/vendor/src/github.com/google/go-github/github/timestamp_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "testing"
- "time"
-)
-
-const (
- emptyTimeStr = `"0001-01-01T00:00:00Z"`
- referenceTimeStr = `"2006-01-02T15:04:05Z"`
- referenceUnixTimeStr = `1136214245`
-)
-
-var (
- referenceTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)
- unixOrigin = time.Unix(0, 0).In(time.UTC)
-)
-
-func TestTimestamp_Marshal(t *testing.T) {
- testCases := []struct {
- desc string
- data Timestamp
- want string
- wantErr bool
- equal bool
- }{
- {"Reference", Timestamp{referenceTime}, referenceTimeStr, false, true},
- {"Empty", Timestamp{}, emptyTimeStr, false, true},
- {"Mismatch", Timestamp{}, referenceTimeStr, false, false},
- }
- for _, tc := range testCases {
- out, err := json.Marshal(tc.data)
- if gotErr := err != nil; gotErr != tc.wantErr {
- t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err)
- }
- got := string(out)
- equal := got == tc.want
- if (got == tc.want) != tc.equal {
- t.Errorf("%s: got=%s, want=%s, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal)
- }
- }
-}
-
-func TestTimestamp_Unmarshal(t *testing.T) {
- testCases := []struct {
- desc string
- data string
- want Timestamp
- wantErr bool
- equal bool
- }{
- {"Reference", referenceTimeStr, Timestamp{referenceTime}, false, true},
- {"ReferenceUnix", `1136214245`, Timestamp{referenceTime}, false, true},
- {"Empty", emptyTimeStr, Timestamp{}, false, true},
- {"UnixStart", `0`, Timestamp{unixOrigin}, false, true},
- {"Mismatch", referenceTimeStr, Timestamp{}, false, false},
- {"MismatchUnix", `0`, Timestamp{}, false, false},
- {"Invalid", `"asdf"`, Timestamp{referenceTime}, true, false},
- }
- for _, tc := range testCases {
- var got Timestamp
- err := json.Unmarshal([]byte(tc.data), &got)
- if gotErr := err != nil; gotErr != tc.wantErr {
- t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err)
- continue
- }
- equal := got.Equal(tc.want)
- if equal != tc.equal {
- t.Errorf("%s: got=%#v, want=%#v, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal)
- }
- }
-}
-
-func TestTimstamp_MarshalReflexivity(t *testing.T) {
- testCases := []struct {
- desc string
- data Timestamp
- }{
- {"Reference", Timestamp{referenceTime}},
- {"Empty", Timestamp{}},
- }
- for _, tc := range testCases {
- data, err := json.Marshal(tc.data)
- if err != nil {
- t.Errorf("%s: Marshal err=%v", tc.desc, err)
- }
- var got Timestamp
- err = json.Unmarshal(data, &got)
- if !got.Equal(tc.data) {
- t.Errorf("%s: %+v != %+v", tc.desc, got, data)
- }
- }
-}
-
-type WrappedTimestamp struct {
- A int
- Time Timestamp
-}
-
-func TestWrappedTimstamp_Marshal(t *testing.T) {
- testCases := []struct {
- desc string
- data WrappedTimestamp
- want string
- wantErr bool
- equal bool
- }{
- {"Reference", WrappedTimestamp{0, Timestamp{referenceTime}}, fmt.Sprintf(`{"A":0,"Time":%s}`, referenceTimeStr), false, true},
- {"Empty", WrappedTimestamp{}, fmt.Sprintf(`{"A":0,"Time":%s}`, emptyTimeStr), false, true},
- {"Mismatch", WrappedTimestamp{}, fmt.Sprintf(`{"A":0,"Time":%s}`, referenceTimeStr), false, false},
- }
- for _, tc := range testCases {
- out, err := json.Marshal(tc.data)
- if gotErr := err != nil; gotErr != tc.wantErr {
- t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err)
- }
- got := string(out)
- equal := got == tc.want
- if equal != tc.equal {
- t.Errorf("%s: got=%s, want=%s, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal)
- }
- }
-}
-
-func TestWrappedTimstamp_Unmarshal(t *testing.T) {
- testCases := []struct {
- desc string
- data string
- want WrappedTimestamp
- wantErr bool
- equal bool
- }{
- {"Reference", referenceTimeStr, WrappedTimestamp{0, Timestamp{referenceTime}}, false, true},
- {"ReferenceUnix", referenceUnixTimeStr, WrappedTimestamp{0, Timestamp{referenceTime}}, false, true},
- {"Empty", emptyTimeStr, WrappedTimestamp{0, Timestamp{}}, false, true},
- {"UnixStart", `0`, WrappedTimestamp{0, Timestamp{unixOrigin}}, false, true},
- {"Mismatch", referenceTimeStr, WrappedTimestamp{0, Timestamp{}}, false, false},
- {"MismatchUnix", `0`, WrappedTimestamp{0, Timestamp{}}, false, false},
- {"Invalid", `"asdf"`, WrappedTimestamp{0, Timestamp{referenceTime}}, true, false},
- }
- for _, tc := range testCases {
- var got Timestamp
- err := json.Unmarshal([]byte(tc.data), &got)
- if gotErr := err != nil; gotErr != tc.wantErr {
- t.Errorf("%s: gotErr=%v, wantErr=%v, err=%v", tc.desc, gotErr, tc.wantErr, err)
- continue
- }
- equal := got.Time.Equal(tc.want.Time.Time)
- if equal != tc.equal {
- t.Errorf("%s: got=%#v, want=%#v, equal=%v, want=%v", tc.desc, got, tc.want, equal, tc.equal)
- }
- }
-}
-
-func TestWrappedTimstamp_MarshalReflexivity(t *testing.T) {
- testCases := []struct {
- desc string
- data WrappedTimestamp
- }{
- {"Reference", WrappedTimestamp{0, Timestamp{referenceTime}}},
- {"Empty", WrappedTimestamp{0, Timestamp{}}},
- }
- for _, tc := range testCases {
- bytes, err := json.Marshal(tc.data)
- if err != nil {
- t.Errorf("%s: Marshal err=%v", tc.desc, err)
- }
- var got WrappedTimestamp
- err = json.Unmarshal(bytes, &got)
- if !got.Time.Equal(tc.data.Time) {
- t.Errorf("%s: %+v != %+v", tc.desc, got, tc.data)
- }
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/users.go b/vendor/src/github.com/google/go-github/github/users.go
deleted file mode 100644
index 8f63746..0000000
--- a/vendor/src/github.com/google/go-github/github/users.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// UsersService handles communication with the user related
-// methods of the GitHub API.
-//
-// GitHub API docs: http://developer.github.com/v3/users/
-type UsersService service
-
-// User represents a GitHub user.
-type User struct {
- Login *string `json:"login,omitempty"`
- ID *int `json:"id,omitempty"`
- AvatarURL *string `json:"avatar_url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- GravatarID *string `json:"gravatar_id,omitempty"`
- Name *string `json:"name,omitempty"`
- Company *string `json:"company,omitempty"`
- Blog *string `json:"blog,omitempty"`
- Location *string `json:"location,omitempty"`
- Email *string `json:"email,omitempty"`
- Hireable *bool `json:"hireable,omitempty"`
- Bio *string `json:"bio,omitempty"`
- PublicRepos *int `json:"public_repos,omitempty"`
- PublicGists *int `json:"public_gists,omitempty"`
- Followers *int `json:"followers,omitempty"`
- Following *int `json:"following,omitempty"`
- CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"updated_at,omitempty"`
- SuspendedAt *Timestamp `json:"suspended_at,omitempty"`
- Type *string `json:"type,omitempty"`
- SiteAdmin *bool `json:"site_admin,omitempty"`
- TotalPrivateRepos *int `json:"total_private_repos,omitempty"`
- OwnedPrivateRepos *int `json:"owned_private_repos,omitempty"`
- PrivateGists *int `json:"private_gists,omitempty"`
- DiskUsage *int `json:"disk_usage,omitempty"`
- Collaborators *int `json:"collaborators,omitempty"`
- Plan *Plan `json:"plan,omitempty"`
-
- // API URLs
- URL *string `json:"url,omitempty"`
- EventsURL *string `json:"events_url,omitempty"`
- FollowingURL *string `json:"following_url,omitempty"`
- FollowersURL *string `json:"followers_url,omitempty"`
- GistsURL *string `json:"gists_url,omitempty"`
- OrganizationsURL *string `json:"organizations_url,omitempty"`
- ReceivedEventsURL *string `json:"received_events_url,omitempty"`
- ReposURL *string `json:"repos_url,omitempty"`
- StarredURL *string `json:"starred_url,omitempty"`
- SubscriptionsURL *string `json:"subscriptions_url,omitempty"`
-
- // TextMatches is only populated from search results that request text matches
- // See: search.go and https://developer.github.com/v3/search/#text-match-metadata
- TextMatches []TextMatch `json:"text_matches,omitempty"`
-
- // Permissions identifies the permissions that a user has on a given
- // repository. This is only populated when calling Repositories.ListCollaborators.
- Permissions *map[string]bool `json:"permissions,omitempty"`
-}
-
-func (u User) String() string {
- return Stringify(u)
-}
-
-// Get fetches a user. Passing the empty string will fetch the authenticated
-// user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/#get-a-single-user
-func (s *UsersService) Get(user string) (*User, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("users/%v", user)
- } else {
- u = "user"
- }
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- uResp := new(User)
- resp, err := s.client.Do(req, uResp)
- if err != nil {
- return nil, resp, err
- }
-
- return uResp, resp, err
-}
-
-// GetByID fetches a user.
-//
-// Note: GetByID uses the undocumented GitHub API endpoint /user/:id.
-func (s *UsersService) GetByID(id int) (*User, *Response, error) {
- u := fmt.Sprintf("user/%d", id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- user := new(User)
- resp, err := s.client.Do(req, user)
- if err != nil {
- return nil, resp, err
- }
-
- return user, resp, err
-}
-
-// Edit the authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/#update-the-authenticated-user
-func (s *UsersService) Edit(user *User) (*User, *Response, error) {
- u := "user"
- req, err := s.client.NewRequest("PATCH", u, user)
- if err != nil {
- return nil, nil, err
- }
-
- uResp := new(User)
- resp, err := s.client.Do(req, uResp)
- if err != nil {
- return nil, resp, err
- }
-
- return uResp, resp, err
-}
-
-// UserListOptions specifies optional parameters to the UsersService.ListAll
-// method.
-type UserListOptions struct {
- // ID of the last user seen
- Since int `url:"since,omitempty"`
-
- ListOptions
-}
-
-// ListAll lists all GitHub users.
-//
-// To paginate through all users, populate 'Since' with the ID of the last user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/#get-all-users
-func (s *UsersService) ListAll(opt *UserListOptions) ([]*User, *Response, error) {
- u, err := addOptions("users", opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- users := new([]*User)
- resp, err := s.client.Do(req, users)
- if err != nil {
- return nil, resp, err
- }
-
- return *users, resp, err
-}
-
-// ListInvitations lists all currently-open repository invitations for the
-// authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/invitations/#list-a-users-repository-invitations
-func (s *UsersService) ListInvitations() ([]*RepositoryInvitation, *Response, error) {
- req, err := s.client.NewRequest("GET", "user/repository_invitations", nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
-
- invites := []*RepositoryInvitation{}
- resp, err := s.client.Do(req, &invites)
- if err != nil {
- return nil, resp, err
- }
-
- return invites, resp, err
-}
-
-// AcceptInvitation accepts the currently-open repository invitation for the
-// authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/invitations/#accept-a-repository-invitation
-func (s *UsersService) AcceptInvitation(invitationID int) (*Response, error) {
- u := fmt.Sprintf("user/repository_invitations/%v", invitationID)
- req, err := s.client.NewRequest("PATCH", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
-
- return s.client.Do(req, nil)
-}
-
-// DeclineInvitation declines the currently-open repository invitation for the
-// authenticated user.
-//
-// GitHub API docs: https://developer.github.com/v3/repos/invitations/#decline-a-repository-invitation
-func (s *UsersService) DeclineInvitation(invitationID int) (*Response, error) {
- u := fmt.Sprintf("user/repository_invitations/%v", invitationID)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_administration.go b/vendor/src/github.com/google/go-github/github/users_administration.go
deleted file mode 100644
index dc1dcb8..0000000
--- a/vendor/src/github.com/google/go-github/github/users_administration.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// PromoteSiteAdmin promotes a user to a site administrator of a GitHub Enterprise instance.
-//
-// GitHub API docs: https://developer.github.com/v3/users/administration/#promote-an-ordinary-user-to-a-site-administrator
-func (s *UsersService) PromoteSiteAdmin(user string) (*Response, error) {
- u := fmt.Sprintf("users/%v/site_admin", user)
-
- req, err := s.client.NewRequest("PUT", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// DemoteSiteAdmin demotes a user from site administrator of a GitHub Enterprise instance.
-//
-// GitHub API docs: https://developer.github.com/v3/users/administration/#demote-a-site-administrator-to-an-ordinary-user
-func (s *UsersService) DemoteSiteAdmin(user string) (*Response, error) {
- u := fmt.Sprintf("users/%v/site_admin", user)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// Suspend a user on a GitHub Enterprise instance.
-//
-// GitHub API docs: https://developer.github.com/v3/users/administration/#suspend-a-user
-func (s *UsersService) Suspend(user string) (*Response, error) {
- u := fmt.Sprintf("users/%v/suspended", user)
-
- req, err := s.client.NewRequest("PUT", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// Unsuspend a user on a GitHub Enterprise instance.
-//
-// GitHub API docs: https://developer.github.com/v3/users/administration/#unsuspend-a-user
-func (s *UsersService) Unsuspend(user string) (*Response, error) {
- u := fmt.Sprintf("users/%v/suspended", user)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_administration_test.go b/vendor/src/github.com/google/go-github/github/users_administration_test.go
deleted file mode 100644
index d415f4d..0000000
--- a/vendor/src/github.com/google/go-github/github/users_administration_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2014 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "net/http"
- "testing"
-)
-
-func TestUsersService_PromoteSiteAdmin(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/site_admin", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Users.PromoteSiteAdmin("u")
- if err != nil {
- t.Errorf("Users.PromoteSiteAdmin returned error: %v", err)
- }
-}
-
-func TestUsersService_DemoteSiteAdmin(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/site_admin", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Users.DemoteSiteAdmin("u")
- if err != nil {
- t.Errorf("Users.DemoteSiteAdmin returned error: %v", err)
- }
-}
-
-func TestUsersService_Suspend(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/suspended", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Users.Suspend("u")
- if err != nil {
- t.Errorf("Users.Suspend returned error: %v", err)
- }
-}
-
-func TestUsersService_Unsuspend(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/suspended", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- w.WriteHeader(http.StatusNoContent)
- })
-
- _, err := client.Users.Unsuspend("u")
- if err != nil {
- t.Errorf("Users.Unsuspend returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_emails.go b/vendor/src/github.com/google/go-github/github/users_emails.go
deleted file mode 100644
index e4a5898..0000000
--- a/vendor/src/github.com/google/go-github/github/users_emails.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-// UserEmail represents user's email address
-type UserEmail struct {
- Email *string `json:"email,omitempty"`
- Primary *bool `json:"primary,omitempty"`
- Verified *bool `json:"verified,omitempty"`
-}
-
-// ListEmails lists all email addresses for the authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/emails/#list-email-addresses-for-a-user
-func (s *UsersService) ListEmails(opt *ListOptions) ([]*UserEmail, *Response, error) {
- u := "user/emails"
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- emails := new([]*UserEmail)
- resp, err := s.client.Do(req, emails)
- if err != nil {
- return nil, resp, err
- }
-
- return *emails, resp, err
-}
-
-// AddEmails adds email addresses of the authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/emails/#add-email-addresses
-func (s *UsersService) AddEmails(emails []string) ([]*UserEmail, *Response, error) {
- u := "user/emails"
- req, err := s.client.NewRequest("POST", u, emails)
- if err != nil {
- return nil, nil, err
- }
-
- e := new([]*UserEmail)
- resp, err := s.client.Do(req, e)
- if err != nil {
- return nil, resp, err
- }
-
- return *e, resp, err
-}
-
-// DeleteEmails deletes email addresses from authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/emails/#delete-email-addresses
-func (s *UsersService) DeleteEmails(emails []string) (*Response, error) {
- u := "user/emails"
- req, err := s.client.NewRequest("DELETE", u, emails)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_emails_test.go b/vendor/src/github.com/google/go-github/github/users_emails_test.go
deleted file mode 100644
index 13a444f..0000000
--- a/vendor/src/github.com/google/go-github/github/users_emails_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestUsersService_ListEmails(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/emails", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{
- "email": "user@example.com",
- "verified": false,
- "primary": true
- }]`)
- })
-
- opt := &ListOptions{Page: 2}
- emails, _, err := client.Users.ListEmails(opt)
- if err != nil {
- t.Errorf("Users.ListEmails returned error: %v", err)
- }
-
- want := []*UserEmail{{Email: String("user@example.com"), Verified: Bool(false), Primary: Bool(true)}}
- if !reflect.DeepEqual(emails, want) {
- t.Errorf("Users.ListEmails returned %+v, want %+v", emails, want)
- }
-}
-
-func TestUsersService_AddEmails(t *testing.T) {
- setup()
- defer teardown()
-
- input := []string{"new@example.com"}
-
- mux.HandleFunc("/user/emails", func(w http.ResponseWriter, r *http.Request) {
- v := new([]string)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(*v, input) {
- t.Errorf("Request body = %+v, want %+v", *v, input)
- }
-
- fmt.Fprint(w, `[{"email":"old@example.com"}, {"email":"new@example.com"}]`)
- })
-
- emails, _, err := client.Users.AddEmails(input)
- if err != nil {
- t.Errorf("Users.AddEmails returned error: %v", err)
- }
-
- want := []*UserEmail{
- {Email: String("old@example.com")},
- {Email: String("new@example.com")},
- }
- if !reflect.DeepEqual(emails, want) {
- t.Errorf("Users.AddEmails returned %+v, want %+v", emails, want)
- }
-}
-
-func TestUsersService_DeleteEmails(t *testing.T) {
- setup()
- defer teardown()
-
- input := []string{"user@example.com"}
-
- mux.HandleFunc("/user/emails", func(w http.ResponseWriter, r *http.Request) {
- v := new([]string)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "DELETE")
- if !reflect.DeepEqual(*v, input) {
- t.Errorf("Request body = %+v, want %+v", *v, input)
- }
- })
-
- _, err := client.Users.DeleteEmails(input)
- if err != nil {
- t.Errorf("Users.DeleteEmails returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_followers.go b/vendor/src/github.com/google/go-github/github/users_followers.go
deleted file mode 100644
index 38a1662..0000000
--- a/vendor/src/github.com/google/go-github/github/users_followers.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// ListFollowers lists the followers for a user. Passing the empty string will
-// fetch followers for the authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/followers/#list-followers-of-a-user
-func (s *UsersService) ListFollowers(user string, opt *ListOptions) ([]*User, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("users/%v/followers", user)
- } else {
- u = "user/followers"
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- users := new([]*User)
- resp, err := s.client.Do(req, users)
- if err != nil {
- return nil, resp, err
- }
-
- return *users, resp, err
-}
-
-// ListFollowing lists the people that a user is following. Passing the empty
-// string will list people the authenticated user is following.
-//
-// GitHub API docs: http://developer.github.com/v3/users/followers/#list-users-followed-by-another-user
-func (s *UsersService) ListFollowing(user string, opt *ListOptions) ([]*User, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("users/%v/following", user)
- } else {
- u = "user/following"
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- users := new([]*User)
- resp, err := s.client.Do(req, users)
- if err != nil {
- return nil, resp, err
- }
-
- return *users, resp, err
-}
-
-// IsFollowing checks if "user" is following "target". Passing the empty
-// string for "user" will check if the authenticated user is following "target".
-//
-// GitHub API docs: http://developer.github.com/v3/users/followers/#check-if-you-are-following-a-user
-func (s *UsersService) IsFollowing(user, target string) (bool, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("users/%v/following/%v", user, target)
- } else {
- u = fmt.Sprintf("user/following/%v", target)
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return false, nil, err
- }
-
- resp, err := s.client.Do(req, nil)
- following, err := parseBoolResponse(err)
- return following, resp, err
-}
-
-// Follow will cause the authenticated user to follow the specified user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/followers/#follow-a-user
-func (s *UsersService) Follow(user string) (*Response, error) {
- u := fmt.Sprintf("user/following/%v", user)
- req, err := s.client.NewRequest("PUT", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
-// Unfollow will cause the authenticated user to unfollow the specified user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/followers/#unfollow-a-user
-func (s *UsersService) Unfollow(user string) (*Response, error) {
- u := fmt.Sprintf("user/following/%v", user)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_followers_test.go b/vendor/src/github.com/google/go-github/github/users_followers_test.go
deleted file mode 100644
index 0447721..0000000
--- a/vendor/src/github.com/google/go-github/github/users_followers_test.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestUsersService_ListFollowers_authenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/followers", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- users, _, err := client.Users.ListFollowers("", opt)
- if err != nil {
- t.Errorf("Users.ListFollowers returned error: %v", err)
- }
-
- want := []*User{{ID: Int(1)}}
- if !reflect.DeepEqual(users, want) {
- t.Errorf("Users.ListFollowers returned %+v, want %+v", users, want)
- }
-}
-
-func TestUsersService_ListFollowers_specifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/followers", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- users, _, err := client.Users.ListFollowers("u", nil)
- if err != nil {
- t.Errorf("Users.ListFollowers returned error: %v", err)
- }
-
- want := []*User{{ID: Int(1)}}
- if !reflect.DeepEqual(users, want) {
- t.Errorf("Users.ListFollowers returned %+v, want %+v", users, want)
- }
-}
-
-func TestUsersService_ListFollowers_invalidUser(t *testing.T) {
- _, _, err := client.Users.ListFollowers("%", nil)
- testURLParseError(t, err)
-}
-
-func TestUsersService_ListFollowing_authenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/following", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opts := &ListOptions{Page: 2}
- users, _, err := client.Users.ListFollowing("", opts)
- if err != nil {
- t.Errorf("Users.ListFollowing returned error: %v", err)
- }
-
- want := []*User{{ID: Int(1)}}
- if !reflect.DeepEqual(users, want) {
- t.Errorf("Users.ListFollowing returned %+v, want %+v", users, want)
- }
-}
-
-func TestUsersService_ListFollowing_specifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/following", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- users, _, err := client.Users.ListFollowing("u", nil)
- if err != nil {
- t.Errorf("Users.ListFollowing returned error: %v", err)
- }
-
- want := []*User{{ID: Int(1)}}
- if !reflect.DeepEqual(users, want) {
- t.Errorf("Users.ListFollowing returned %+v, want %+v", users, want)
- }
-}
-
-func TestUsersService_ListFollowing_invalidUser(t *testing.T) {
- _, _, err := client.Users.ListFollowing("%", nil)
- testURLParseError(t, err)
-}
-
-func TestUsersService_IsFollowing_authenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/following/t", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNoContent)
- })
-
- following, _, err := client.Users.IsFollowing("", "t")
- if err != nil {
- t.Errorf("Users.IsFollowing returned error: %v", err)
- }
- if want := true; following != want {
- t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want)
- }
-}
-
-func TestUsersService_IsFollowing_specifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/following/t", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNoContent)
- })
-
- following, _, err := client.Users.IsFollowing("u", "t")
- if err != nil {
- t.Errorf("Users.IsFollowing returned error: %v", err)
- }
- if want := true; following != want {
- t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want)
- }
-}
-
-func TestUsersService_IsFollowing_false(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/following/t", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- w.WriteHeader(http.StatusNotFound)
- })
-
- following, _, err := client.Users.IsFollowing("u", "t")
- if err != nil {
- t.Errorf("Users.IsFollowing returned error: %v", err)
- }
- if want := false; following != want {
- t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want)
- }
-}
-
-func TestUsersService_IsFollowing_error(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/following/t", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- http.Error(w, "BadRequest", http.StatusBadRequest)
- })
-
- following, _, err := client.Users.IsFollowing("u", "t")
- if err == nil {
- t.Errorf("Expected HTTP 400 response")
- }
- if want := false; following != want {
- t.Errorf("Users.IsFollowing returned %+v, want %+v", following, want)
- }
-}
-
-func TestUsersService_IsFollowing_invalidUser(t *testing.T) {
- _, _, err := client.Users.IsFollowing("%", "%")
- testURLParseError(t, err)
-}
-
-func TestUsersService_Follow(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/following/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PUT")
- })
-
- _, err := client.Users.Follow("u")
- if err != nil {
- t.Errorf("Users.Follow returned error: %v", err)
- }
-}
-
-func TestUsersService_Follow_invalidUser(t *testing.T) {
- _, err := client.Users.Follow("%")
- testURLParseError(t, err)
-}
-
-func TestUsersService_Unfollow(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/following/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Users.Unfollow("u")
- if err != nil {
- t.Errorf("Users.Follow returned error: %v", err)
- }
-}
-
-func TestUsersService_Unfollow_invalidUser(t *testing.T) {
- _, err := client.Users.Unfollow("%")
- testURLParseError(t, err)
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_gpg_keys.go b/vendor/src/github.com/google/go-github/github/users_gpg_keys.go
deleted file mode 100644
index 08cfbed..0000000
--- a/vendor/src/github.com/google/go-github/github/users_gpg_keys.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "fmt"
- "time"
-)
-
-// GPGKey represents a GitHub user's public GPG key used to verify GPG signed commits and tags.
-//
-// https://developer.github.com/changes/2016-04-04-git-signing-api-preview/
-type GPGKey struct {
- ID *int `json:"id,omitempty"`
- PrimaryKeyID *int `json:"primary_key_id,omitempty"`
- KeyID *string `json:"key_id,omitempty"`
- PublicKey *string `json:"public_key,omitempty"`
- Emails []GPGEmail `json:"emails,omitempty"`
- Subkeys []GPGKey `json:"subkeys,omitempty"`
- CanSign *bool `json:"can_sign,omitempty"`
- CanEncryptComms *bool `json:"can_encrypt_comms,omitempty"`
- CanEncryptStorage *bool `json:"can_encrypt_storage,omitempty"`
- CanCertify *bool `json:"can_certify,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- ExpiresAt *time.Time `json:"expires_at,omitempty"`
-}
-
-// String stringifies a GPGKey.
-func (k GPGKey) String() string {
- return Stringify(k)
-}
-
-// GPGEmail represents an email address associated to a GPG key.
-type GPGEmail struct {
- Email *string `json:"email,omitempty"`
- Verified *bool `json:"verified,omitempty"`
-}
-
-// ListGPGKeys lists the current user's GPG keys. It requires authentication
-// via Basic Auth or via OAuth with at least read:gpg_key scope.
-//
-// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#list-your-gpg-keys
-func (s *UsersService) ListGPGKeys() ([]*GPGKey, *Response, error) {
- req, err := s.client.NewRequest("GET", "user/gpg_keys", nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeGitSigningPreview)
-
- var keys []*GPGKey
- resp, err := s.client.Do(req, &keys)
- if err != nil {
- return nil, resp, err
- }
-
- return keys, resp, err
-}
-
-// GetGPGKey gets extended details for a single GPG key. It requires authentication
-// via Basic Auth or via OAuth with at least read:gpg_key scope.
-//
-// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#get-a-single-gpg-key
-func (s *UsersService) GetGPGKey(id int) (*GPGKey, *Response, error) {
- u := fmt.Sprintf("user/gpg_keys/%v", id)
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeGitSigningPreview)
-
- key := &GPGKey{}
- resp, err := s.client.Do(req, key)
- if err != nil {
- return nil, resp, err
- }
-
- return key, resp, err
-}
-
-// CreateGPGKey creates a GPG key. It requires authenticatation via Basic Auth
-// or OAuth with at least write:gpg_key scope.
-//
-// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#create-a-gpg-key
-func (s *UsersService) CreateGPGKey(armoredPublicKey string) (*GPGKey, *Response, error) {
- gpgKey := &struct {
- ArmoredPublicKey string `json:"armored_public_key"`
- }{ArmoredPublicKey: armoredPublicKey}
- req, err := s.client.NewRequest("POST", "user/gpg_keys", gpgKey)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeGitSigningPreview)
-
- key := &GPGKey{}
- resp, err := s.client.Do(req, key)
- if err != nil {
- return nil, resp, err
- }
-
- return key, resp, err
-}
-
-// DeleteGPGKey deletes a GPG key. It requires authentication via Basic Auth or
-// via OAuth with at least admin:gpg_key scope.
-//
-// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#delete-a-gpg-key
-func (s *UsersService) DeleteGPGKey(id int) (*Response, error) {
- u := fmt.Sprintf("user/gpg_keys/%v", id)
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeGitSigningPreview)
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_gpg_keys_test.go b/vendor/src/github.com/google/go-github/github/users_gpg_keys_test.go
deleted file mode 100644
index 05ef23f..0000000
--- a/vendor/src/github.com/google/go-github/github/users_gpg_keys_test.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestUsersService_ListGPGKeys(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/gpg_keys", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeGitSigningPreview)
- fmt.Fprint(w, `[{"id":1,"primary_key_id":2}]`)
- })
-
- keys, _, err := client.Users.ListGPGKeys()
- if err != nil {
- t.Errorf("Users.ListGPGKeys returned error: %v", err)
- }
-
- want := []*GPGKey{{ID: Int(1), PrimaryKeyID: Int(2)}}
- if !reflect.DeepEqual(keys, want) {
- t.Errorf("Users.ListGPGKeys = %+v, want %+v", keys, want)
- }
-}
-
-func TestUsersService_GetGPGKey(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/gpg_keys/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeGitSigningPreview)
- fmt.Fprint(w, `{"id":1}`)
- })
-
- key, _, err := client.Users.GetGPGKey(1)
- if err != nil {
- t.Errorf("Users.GetGPGKey returned error: %v", err)
- }
-
- want := &GPGKey{ID: Int(1)}
- if !reflect.DeepEqual(key, want) {
- t.Errorf("Users.GetGPGKey = %+v, want %+v", key, want)
- }
-}
-
-func TestUsersService_CreateGPGKey(t *testing.T) {
- setup()
- defer teardown()
-
- input := `
------BEGIN PGP PUBLIC KEY BLOCK-----
-Comment: GPGTools - https://gpgtools.org
-
-mQINBFcEd9kBEACo54TDbGhKlXKWMvJgecEUKPPcv7XdnpKdGb3LRw5MvFwT0V0f
-...
-=tqfb
------END PGP PUBLIC KEY BLOCK-----`
-
- mux.HandleFunc("/user/gpg_keys", func(w http.ResponseWriter, r *http.Request) {
- var gpgKey struct {
- ArmoredPublicKey *string `json:"armored_public_key,omitempty"`
- }
- json.NewDecoder(r.Body).Decode(&gpgKey)
-
- testMethod(t, r, "POST")
- testHeader(t, r, "Accept", mediaTypeGitSigningPreview)
- if gpgKey.ArmoredPublicKey == nil || *gpgKey.ArmoredPublicKey != input {
- t.Errorf("gpgKey = %+v, want %q", gpgKey, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- gpgKey, _, err := client.Users.CreateGPGKey(input)
- if err != nil {
- t.Errorf("Users.GetGPGKey returned error: %v", err)
- }
-
- want := &GPGKey{ID: Int(1)}
- if !reflect.DeepEqual(gpgKey, want) {
- t.Errorf("Users.GetGPGKey = %+v, want %+v", gpgKey, want)
- }
-}
-
-func TestUsersService_DeleteGPGKey(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/gpg_keys/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- testHeader(t, r, "Accept", mediaTypeGitSigningPreview)
- })
-
- _, err := client.Users.DeleteGPGKey(1)
- if err != nil {
- t.Errorf("Users.DeleteGPGKey returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_keys.go b/vendor/src/github.com/google/go-github/github/users_keys.go
deleted file mode 100644
index e4c255f..0000000
--- a/vendor/src/github.com/google/go-github/github/users_keys.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "fmt"
-
-// Key represents a public SSH key used to authenticate a user or deploy script.
-type Key struct {
- ID *int `json:"id,omitempty"`
- Key *string `json:"key,omitempty"`
- URL *string `json:"url,omitempty"`
- Title *string `json:"title,omitempty"`
- ReadOnly *bool `json:"read_only,omitempty"`
-}
-
-func (k Key) String() string {
- return Stringify(k)
-}
-
-// ListKeys lists the verified public keys for a user. Passing the empty
-// string will fetch keys for the authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/keys/#list-public-keys-for-a-user
-func (s *UsersService) ListKeys(user string, opt *ListOptions) ([]*Key, *Response, error) {
- var u string
- if user != "" {
- u = fmt.Sprintf("users/%v/keys", user)
- } else {
- u = "user/keys"
- }
- u, err := addOptions(u, opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- keys := new([]*Key)
- resp, err := s.client.Do(req, keys)
- if err != nil {
- return nil, resp, err
- }
-
- return *keys, resp, err
-}
-
-// GetKey fetches a single public key.
-//
-// GitHub API docs: http://developer.github.com/v3/users/keys/#get-a-single-public-key
-func (s *UsersService) GetKey(id int) (*Key, *Response, error) {
- u := fmt.Sprintf("user/keys/%v", id)
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- key := new(Key)
- resp, err := s.client.Do(req, key)
- if err != nil {
- return nil, resp, err
- }
-
- return key, resp, err
-}
-
-// CreateKey adds a public key for the authenticated user.
-//
-// GitHub API docs: http://developer.github.com/v3/users/keys/#create-a-public-key
-func (s *UsersService) CreateKey(key *Key) (*Key, *Response, error) {
- u := "user/keys"
-
- req, err := s.client.NewRequest("POST", u, key)
- if err != nil {
- return nil, nil, err
- }
-
- k := new(Key)
- resp, err := s.client.Do(req, k)
- if err != nil {
- return nil, resp, err
- }
-
- return k, resp, err
-}
-
-// DeleteKey deletes a public key.
-//
-// GitHub API docs: http://developer.github.com/v3/users/keys/#delete-a-public-key
-func (s *UsersService) DeleteKey(id int) (*Response, error) {
- u := fmt.Sprintf("user/keys/%v", id)
-
- req, err := s.client.NewRequest("DELETE", u, nil)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_keys_test.go b/vendor/src/github.com/google/go-github/github/users_keys_test.go
deleted file mode 100644
index 25d4f0c..0000000
--- a/vendor/src/github.com/google/go-github/github/users_keys_test.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestUsersService_ListKeys_authenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/keys", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"page": "2"})
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- opt := &ListOptions{Page: 2}
- keys, _, err := client.Users.ListKeys("", opt)
- if err != nil {
- t.Errorf("Users.ListKeys returned error: %v", err)
- }
-
- want := []*Key{{ID: Int(1)}}
- if !reflect.DeepEqual(keys, want) {
- t.Errorf("Users.ListKeys returned %+v, want %+v", keys, want)
- }
-}
-
-func TestUsersService_ListKeys_specifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u/keys", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `[{"id":1}]`)
- })
-
- keys, _, err := client.Users.ListKeys("u", nil)
- if err != nil {
- t.Errorf("Users.ListKeys returned error: %v", err)
- }
-
- want := []*Key{{ID: Int(1)}}
- if !reflect.DeepEqual(keys, want) {
- t.Errorf("Users.ListKeys returned %+v, want %+v", keys, want)
- }
-}
-
-func TestUsersService_ListKeys_invalidUser(t *testing.T) {
- _, _, err := client.Users.ListKeys("%", nil)
- testURLParseError(t, err)
-}
-
-func TestUsersService_GetKey(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/keys/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- key, _, err := client.Users.GetKey(1)
- if err != nil {
- t.Errorf("Users.GetKey returned error: %v", err)
- }
-
- want := &Key{ID: Int(1)}
- if !reflect.DeepEqual(key, want) {
- t.Errorf("Users.GetKey returned %+v, want %+v", key, want)
- }
-}
-
-func TestUsersService_CreateKey(t *testing.T) {
- setup()
- defer teardown()
-
- input := &Key{Key: String("k"), Title: String("t")}
-
- mux.HandleFunc("/user/keys", func(w http.ResponseWriter, r *http.Request) {
- v := new(Key)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "POST")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- key, _, err := client.Users.CreateKey(input)
- if err != nil {
- t.Errorf("Users.GetKey returned error: %v", err)
- }
-
- want := &Key{ID: Int(1)}
- if !reflect.DeepEqual(key, want) {
- t.Errorf("Users.GetKey returned %+v, want %+v", key, want)
- }
-}
-
-func TestUsersService_DeleteKey(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/keys/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- })
-
- _, err := client.Users.DeleteKey(1)
- if err != nil {
- t.Errorf("Users.DeleteKey returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-github/github/users_test.go b/vendor/src/github.com/google/go-github/github/users_test.go
deleted file mode 100644
index db72afa..0000000
--- a/vendor/src/github.com/google/go-github/github/users_test.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2013 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "reflect"
- "testing"
-)
-
-func TestUser_marshall(t *testing.T) {
- testJSONMarshal(t, &User{}, "{}")
-
- u := &User{
- Login: String("l"),
- ID: Int(1),
- URL: String("u"),
- AvatarURL: String("a"),
- GravatarID: String("g"),
- Name: String("n"),
- Company: String("c"),
- Blog: String("b"),
- Location: String("l"),
- Email: String("e"),
- Hireable: Bool(true),
- PublicRepos: Int(1),
- Followers: Int(1),
- Following: Int(1),
- CreatedAt: &Timestamp{referenceTime},
- SuspendedAt: &Timestamp{referenceTime},
- }
- want := `{
- "login": "l",
- "id": 1,
- "avatar_url": "a",
- "gravatar_id": "g",
- "name": "n",
- "company": "c",
- "blog": "b",
- "location": "l",
- "email": "e",
- "hireable": true,
- "public_repos": 1,
- "followers": 1,
- "following": 1,
- "created_at": ` + referenceTimeStr + `,
- "suspended_at": ` + referenceTimeStr + `,
- "url": "u"
- }`
- testJSONMarshal(t, u, want)
-}
-
-func TestUsersService_Get_authenticatedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- user, _, err := client.Users.Get("")
- if err != nil {
- t.Errorf("Users.Get returned error: %v", err)
- }
-
- want := &User{ID: Int(1)}
- if !reflect.DeepEqual(user, want) {
- t.Errorf("Users.Get returned %+v, want %+v", user, want)
- }
-}
-
-func TestUsersService_Get_specifiedUser(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users/u", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- user, _, err := client.Users.Get("u")
- if err != nil {
- t.Errorf("Users.Get returned error: %v", err)
- }
-
- want := &User{ID: Int(1)}
- if !reflect.DeepEqual(user, want) {
- t.Errorf("Users.Get returned %+v, want %+v", user, want)
- }
-}
-
-func TestUsersService_Get_invalidUser(t *testing.T) {
- _, _, err := client.Users.Get("%")
- testURLParseError(t, err)
-}
-
-func TestUsersService_GetByID(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- fmt.Fprint(w, `{"id":1}`)
- })
-
- user, _, err := client.Users.GetByID(1)
- if err != nil {
- t.Errorf("Users.GetByID returned error: %v", err)
- }
-
- want := &User{ID: Int(1)}
- if !reflect.DeepEqual(user, want) {
- t.Errorf("Users.GetByID returned %+v, want %+v", user, want)
- }
-}
-
-func TestUsersService_Edit(t *testing.T) {
- setup()
- defer teardown()
-
- input := &User{Name: String("n")}
-
- mux.HandleFunc("/user", func(w http.ResponseWriter, r *http.Request) {
- v := new(User)
- json.NewDecoder(r.Body).Decode(v)
-
- testMethod(t, r, "PATCH")
- if !reflect.DeepEqual(v, input) {
- t.Errorf("Request body = %+v, want %+v", v, input)
- }
-
- fmt.Fprint(w, `{"id":1}`)
- })
-
- user, _, err := client.Users.Edit(input)
- if err != nil {
- t.Errorf("Users.Edit returned error: %v", err)
- }
-
- want := &User{ID: Int(1)}
- if !reflect.DeepEqual(user, want) {
- t.Errorf("Users.Edit returned %+v, want %+v", user, want)
- }
-}
-
-func TestUsersService_ListAll(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testFormValues(t, r, values{"since": "1", "page": "2"})
- fmt.Fprint(w, `[{"id":2}]`)
- })
-
- opt := &UserListOptions{1, ListOptions{Page: 2}}
- users, _, err := client.Users.ListAll(opt)
- if err != nil {
- t.Errorf("Users.Get returned error: %v", err)
- }
-
- want := []*User{{ID: Int(2)}}
- if !reflect.DeepEqual(users, want) {
- t.Errorf("Users.ListAll returned %+v, want %+v", users, want)
- }
-}
-
-func TestUsersService_ListInvitations(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/repository_invitations", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "GET")
- testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview)
- fmt.Fprintf(w, `[{"id":1}, {"id":2}]`)
- })
-
- got, _, err := client.Users.ListInvitations()
- if err != nil {
- t.Errorf("Users.ListInvitations returned error: %v", err)
- }
-
- want := []*RepositoryInvitation{{ID: Int(1)}, {ID: Int(2)}}
- if !reflect.DeepEqual(got, want) {
- t.Errorf("Users.ListInvitations = %+v, want %+v", got, want)
- }
-}
-
-func TestUsersService_AcceptInvitation(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/repository_invitations/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "PATCH")
- testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview)
- w.WriteHeader(http.StatusNoContent)
- })
-
- if _, err := client.Users.AcceptInvitation(1); err != nil {
- t.Errorf("Users.AcceptInvitation returned error: %v", err)
- }
-}
-
-func TestUsersService_DeclineInvitation(t *testing.T) {
- setup()
- defer teardown()
-
- mux.HandleFunc("/user/repository_invitations/1", func(w http.ResponseWriter, r *http.Request) {
- testMethod(t, r, "DELETE")
- testHeader(t, r, "Accept", mediaTypeRepositoryInvitationsPreview)
- w.WriteHeader(http.StatusNoContent)
- })
-
- if _, err := client.Users.DeclineInvitation(1); err != nil {
- t.Errorf("Users.DeclineInvitation returned error: %v", err)
- }
-}
diff --git a/vendor/src/github.com/google/go-querystring/query/encode.go b/vendor/src/github.com/google/go-querystring/query/encode.go
deleted file mode 100644
index 19437b3..0000000
--- a/vendor/src/github.com/google/go-querystring/query/encode.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package query implements encoding of structs into URL query parameters.
-//
-// As a simple example:
-//
-// type Options struct {
-// Query string `url:"q"`
-// ShowAll bool `url:"all"`
-// Page int `url:"page"`
-// }
-//
-// opt := Options{ "foo", true, 2 }
-// v, _ := query.Values(opt)
-// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2"
-//
-// The exact mapping between Go values and url.Values is described in the
-// documentation for the Values() function.
-package query
-
-import (
- "bytes"
- "fmt"
- "net/url"
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-var timeType = reflect.TypeOf(time.Time{})
-
-var encoderType = reflect.TypeOf(new(Encoder)).Elem()
-
-// Encoder is an interface implemented by any type that wishes to encode
-// itself into URL values in a non-standard way.
-type Encoder interface {
- EncodeValues(key string, v *url.Values) error
-}
-
-// Values returns the url.Values encoding of v.
-//
-// Values expects to be passed a struct, and traverses it recursively using the
-// following encoding rules.
-//
-// Each exported struct field is encoded as a URL parameter unless
-//
-// - the field's tag is "-", or
-// - the field is empty and its tag specifies the "omitempty" option
-//
-// The empty values are false, 0, any nil pointer or interface value, any array
-// slice, map, or string of length zero, and any time.Time that returns true
-// for IsZero().
-//
-// The URL parameter name defaults to the struct field name but can be
-// specified in the struct field's tag value. The "url" key in the struct
-// field's tag value is the key name, followed by an optional comma and
-// options. For example:
-//
-// // Field is ignored by this package.
-// Field int `url:"-"`
-//
-// // Field appears as URL parameter "myName".
-// Field int `url:"myName"`
-//
-// // Field appears as URL parameter "myName" and the field is omitted if
-// // its value is empty
-// Field int `url:"myName,omitempty"`
-//
-// // Field appears as URL parameter "Field" (the default), but the field
-// // is skipped if empty. Note the leading comma.
-// Field int `url:",omitempty"`
-//
-// For encoding individual field values, the following type-dependent rules
-// apply:
-//
-// Boolean values default to encoding as the strings "true" or "false".
-// Including the "int" option signals that the field should be encoded as the
-// strings "1" or "0".
-//
-// time.Time values default to encoding as RFC3339 timestamps. Including the
-// "unix" option signals that the field should be encoded as a Unix time (see
-// time.Unix())
-//
-// Slice and Array values default to encoding as multiple URL values of the
-// same name. Including the "comma" option signals that the field should be
-// encoded as a single comma-delimited value. Including the "space" option
-// similarly encodes the value as a single space-delimited string. Including
-// the "semicolon" option will encode the value as a semicolon-delimited string.
-// Including the "brackets" option signals that the multiple URL values should
-// have "[]" appended to the value name. "numbered" will append a number to
-// the end of each incidence of the value name, example:
-// name0=value0&name1=value1, etc.
-//
-// Anonymous struct fields are usually encoded as if their inner exported
-// fields were fields in the outer struct, subject to the standard Go
-// visibility rules. An anonymous struct field with a name given in its URL
-// tag is treated as having that name, rather than being anonymous.
-//
-// Non-nil pointer values are encoded as the value pointed to.
-//
-// Nested structs are encoded including parent fields in value names for
-// scoping. e.g:
-//
-// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO"
-//
-// All other values are encoded using their default string representation.
-//
-// Multiple fields that encode to the same URL parameter name will be included
-// as multiple URL values of the same name.
-func Values(v interface{}) (url.Values, error) {
- values := make(url.Values)
- val := reflect.ValueOf(v)
- for val.Kind() == reflect.Ptr {
- if val.IsNil() {
- return values, nil
- }
- val = val.Elem()
- }
-
- if v == nil {
- return values, nil
- }
-
- if val.Kind() != reflect.Struct {
- return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind())
- }
-
- err := reflectValue(values, val, "")
- return values, err
-}
-
-// reflectValue populates the values parameter from the struct fields in val.
-// Embedded structs are followed recursively (using the rules defined in the
-// Values function documentation) breadth-first.
-func reflectValue(values url.Values, val reflect.Value, scope string) error {
- var embedded []reflect.Value
-
- typ := val.Type()
- for i := 0; i < typ.NumField(); i++ {
- sf := typ.Field(i)
- if sf.PkgPath != "" && !sf.Anonymous { // unexported
- continue
- }
-
- sv := val.Field(i)
- tag := sf.Tag.Get("url")
- if tag == "-" {
- continue
- }
- name, opts := parseTag(tag)
- if name == "" {
- if sf.Anonymous && sv.Kind() == reflect.Struct {
- // save embedded struct for later processing
- embedded = append(embedded, sv)
- continue
- }
-
- name = sf.Name
- }
-
- if scope != "" {
- name = scope + "[" + name + "]"
- }
-
- if opts.Contains("omitempty") && isEmptyValue(sv) {
- continue
- }
-
- if sv.Type().Implements(encoderType) {
- if !reflect.Indirect(sv).IsValid() {
- sv = reflect.New(sv.Type().Elem())
- }
-
- m := sv.Interface().(Encoder)
- if err := m.EncodeValues(name, &values); err != nil {
- return err
- }
- continue
- }
-
- if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array {
- var del byte
- if opts.Contains("comma") {
- del = ','
- } else if opts.Contains("space") {
- del = ' '
- } else if opts.Contains("semicolon") {
- del = ';'
- } else if opts.Contains("brackets") {
- name = name + "[]"
- }
-
- if del != 0 {
- s := new(bytes.Buffer)
- first := true
- for i := 0; i < sv.Len(); i++ {
- if first {
- first = false
- } else {
- s.WriteByte(del)
- }
- s.WriteString(valueString(sv.Index(i), opts))
- }
- values.Add(name, s.String())
- } else {
- for i := 0; i < sv.Len(); i++ {
- k := name
- if opts.Contains("numbered") {
- k = fmt.Sprintf("%s%d", name, i)
- }
- values.Add(k, valueString(sv.Index(i), opts))
- }
- }
- continue
- }
-
- if sv.Type() == timeType {
- values.Add(name, valueString(sv, opts))
- continue
- }
-
- for sv.Kind() == reflect.Ptr {
- if sv.IsNil() {
- break
- }
- sv = sv.Elem()
- }
-
- if sv.Kind() == reflect.Struct {
- reflectValue(values, sv, name)
- continue
- }
-
- values.Add(name, valueString(sv, opts))
- }
-
- for _, f := range embedded {
- if err := reflectValue(values, f, scope); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// valueString returns the string representation of a value.
-func valueString(v reflect.Value, opts tagOptions) string {
- for v.Kind() == reflect.Ptr {
- if v.IsNil() {
- return ""
- }
- v = v.Elem()
- }
-
- if v.Kind() == reflect.Bool && opts.Contains("int") {
- if v.Bool() {
- return "1"
- }
- return "0"
- }
-
- if v.Type() == timeType {
- t := v.Interface().(time.Time)
- if opts.Contains("unix") {
- return strconv.FormatInt(t.Unix(), 10)
- }
- return t.Format(time.RFC3339)
- }
-
- return fmt.Sprint(v.Interface())
-}
-
-// isEmptyValue checks if a value should be considered empty for the purposes
-// of omitting fields with the "omitempty" option.
-func isEmptyValue(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- }
-
- if v.Type() == timeType {
- return v.Interface().(time.Time).IsZero()
- }
-
- return false
-}
-
-// tagOptions is the string following a comma in a struct field's "url" tag, or
-// the empty string. It does not include the leading comma.
-type tagOptions []string
-
-// parseTag splits a struct field's url tag into its name and comma-separated
-// options.
-func parseTag(tag string) (string, tagOptions) {
- s := strings.Split(tag, ",")
- return s[0], s[1:]
-}
-
-// Contains checks whether the tagOptions contains the specified option.
-func (o tagOptions) Contains(option string) bool {
- for _, s := range o {
- if s == option {
- return true
- }
- }
- return false
-}
diff --git a/vendor/src/github.com/google/go-querystring/query/encode_test.go b/vendor/src/github.com/google/go-querystring/query/encode_test.go
deleted file mode 100644
index da4c074..0000000
--- a/vendor/src/github.com/google/go-querystring/query/encode_test.go
+++ /dev/null
@@ -1,321 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package query
-
-import (
- "fmt"
- "net/url"
- "reflect"
- "testing"
- "time"
-)
-
-type Nested struct {
- A SubNested `url:"a"`
- B *SubNested `url:"b"`
- Ptr *SubNested `url:"ptr,omitempty"`
-}
-
-type SubNested struct {
- Value string `url:"value"`
-}
-
-func TestValues_types(t *testing.T) {
- str := "string"
- strPtr := &str
-
- tests := []struct {
- in interface{}
- want url.Values
- }{
- {
- // basic primitives
- struct {
- A string
- B int
- C uint
- D float32
- E bool
- }{},
- url.Values{
- "A": {""},
- "B": {"0"},
- "C": {"0"},
- "D": {"0"},
- "E": {"false"},
- },
- },
- {
- // pointers
- struct {
- A *string
- B *int
- C **string
- }{A: strPtr, C: &strPtr},
- url.Values{
- "A": {str},
- "B": {""},
- "C": {str},
- },
- },
- {
- // slices and arrays
- struct {
- A []string
- B []string `url:",comma"`
- C []string `url:",space"`
- D [2]string
- E [2]string `url:",comma"`
- F [2]string `url:",space"`
- G []*string `url:",space"`
- H []bool `url:",int,space"`
- I []string `url:",brackets"`
- J []string `url:",semicolon"`
- K []string `url:",numbered"`
- }{
- A: []string{"a", "b"},
- B: []string{"a", "b"},
- C: []string{"a", "b"},
- D: [2]string{"a", "b"},
- E: [2]string{"a", "b"},
- F: [2]string{"a", "b"},
- G: []*string{&str, &str},
- H: []bool{true, false},
- I: []string{"a", "b"},
- J: []string{"a", "b"},
- K: []string{"a", "b"},
- },
- url.Values{
- "A": {"a", "b"},
- "B": {"a,b"},
- "C": {"a b"},
- "D": {"a", "b"},
- "E": {"a,b"},
- "F": {"a b"},
- "G": {"string string"},
- "H": {"1 0"},
- "I[]": {"a", "b"},
- "J": {"a;b"},
- "K0": {"a"},
- "K1": {"b"},
- },
- },
- {
- // other types
- struct {
- A time.Time
- B time.Time `url:",unix"`
- C bool `url:",int"`
- D bool `url:",int"`
- }{
- A: time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC),
- B: time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC),
- C: true,
- D: false,
- },
- url.Values{
- "A": {"2000-01-01T12:34:56Z"},
- "B": {"946730096"},
- "C": {"1"},
- "D": {"0"},
- },
- },
- {
- struct {
- Nest Nested `url:"nest"`
- }{
- Nested{
- A: SubNested{
- Value: "that",
- },
- },
- },
- url.Values{
- "nest[a][value]": {"that"},
- "nest[b]": {""},
- },
- },
- {
- struct {
- Nest Nested `url:"nest"`
- }{
- Nested{
- Ptr: &SubNested{
- Value: "that",
- },
- },
- },
- url.Values{
- "nest[a][value]": {""},
- "nest[b]": {""},
- "nest[ptr][value]": {"that"},
- },
- },
- {
- nil,
- url.Values{},
- },
- }
-
- for i, tt := range tests {
- v, err := Values(tt.in)
- if err != nil {
- t.Errorf("%d. Values(%q) returned error: %v", i, tt.in, err)
- }
-
- if !reflect.DeepEqual(tt.want, v) {
- t.Errorf("%d. Values(%q) returned %v, want %v", i, tt.in, v, tt.want)
- }
- }
-}
-
-func TestValues_omitEmpty(t *testing.T) {
- str := ""
- s := struct {
- a string
- A string
- B string `url:",omitempty"`
- C string `url:"-"`
- D string `url:"omitempty"` // actually named omitempty, not an option
- E *string `url:",omitempty"`
- }{E: &str}
-
- v, err := Values(s)
- if err != nil {
- t.Errorf("Values(%q) returned error: %v", s, err)
- }
-
- want := url.Values{
- "A": {""},
- "omitempty": {""},
- "E": {""}, // E is included because the pointer is not empty, even though the string being pointed to is
- }
- if !reflect.DeepEqual(want, v) {
- t.Errorf("Values(%q) returned %v, want %v", s, v, want)
- }
-}
-
-type A struct {
- B
-}
-
-type B struct {
- C string
-}
-
-type D struct {
- B
- C string
-}
-
-type e struct {
- B
- C string
-}
-
-type F struct {
- e
-}
-
-func TestValues_embeddedStructs(t *testing.T) {
- tests := []struct {
- in interface{}
- want url.Values
- }{
- {
- A{B{C: "foo"}},
- url.Values{"C": {"foo"}},
- },
- {
- D{B: B{C: "bar"}, C: "foo"},
- url.Values{"C": {"foo", "bar"}},
- },
- {
- F{e{B: B{C: "bar"}, C: "foo"}}, // With unexported embed
- url.Values{"C": {"foo", "bar"}},
- },
- }
-
- for i, tt := range tests {
- v, err := Values(tt.in)
- if err != nil {
- t.Errorf("%d. Values(%q) returned error: %v", i, tt.in, err)
- }
-
- if !reflect.DeepEqual(tt.want, v) {
- t.Errorf("%d. Values(%q) returned %v, want %v", i, tt.in, v, tt.want)
- }
- }
-}
-
-func TestValues_invalidInput(t *testing.T) {
- _, err := Values("")
- if err == nil {
- t.Errorf("expected Values() to return an error on invalid input")
- }
-}
-
-type EncodedArgs []string
-
-func (m EncodedArgs) EncodeValues(key string, v *url.Values) error {
- for i, arg := range m {
- v.Set(fmt.Sprintf("%s.%d", key, i), arg)
- }
- return nil
-}
-
-func TestValues_Marshaler(t *testing.T) {
- s := struct {
- Args EncodedArgs `url:"arg"`
- }{[]string{"a", "b", "c"}}
- v, err := Values(s)
- if err != nil {
- t.Errorf("Values(%q) returned error: %v", s, err)
- }
-
- want := url.Values{
- "arg.0": {"a"},
- "arg.1": {"b"},
- "arg.2": {"c"},
- }
- if !reflect.DeepEqual(want, v) {
- t.Errorf("Values(%q) returned %v, want %v", s, v, want)
- }
-}
-
-func TestValues_MarshalerWithNilPointer(t *testing.T) {
- s := struct {
- Args *EncodedArgs `url:"arg"`
- }{}
- v, err := Values(s)
- if err != nil {
- t.Errorf("Values(%q) returned error: %v", s, err)
- }
-
- want := url.Values{}
- if !reflect.DeepEqual(want, v) {
- t.Errorf("Values(%q) returned %v, want %v", s, v, want)
- }
-}
-
-func TestTagParsing(t *testing.T) {
- name, opts := parseTag("field,foobar,foo")
- if name != "field" {
- t.Fatalf("name = %q, want field", name)
- }
- for _, tt := range []struct {
- opt string
- want bool
- }{
- {"foobar", true},
- {"foo", true},
- {"bar", false},
- {"field", false},
- } {
- if opts.Contains(tt.opt) != tt.want {
- t.Errorf("Contains(%q) = %v", tt.opt, !tt.want)
- }
- }
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/LICENSE.txt b/vendor/src/github.com/gregjones/httpcache/LICENSE.txt
deleted file mode 100644
index 81316be..0000000
--- a/vendor/src/github.com/gregjones/httpcache/LICENSE.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Copyright © 2012 Greg Jones (greg.jones@gmail.com)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Softwareâ€), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED “AS ISâ€, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/src/github.com/gregjones/httpcache/README.md b/vendor/src/github.com/gregjones/httpcache/README.md
deleted file mode 100644
index ccd0172..0000000
--- a/vendor/src/github.com/gregjones/httpcache/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-httpcache
-=========
-
-[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache)
-
-A Transport for Go's http.Client that will cache responses according to the HTTP RFC
-
-Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
-
-It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
-
-**Documentation:** http://godoc.org/github.com/gregjones/httpcache
-
-**License:** MIT (see LICENSE.txt)
-
-Cache backends
---------------
-
-- The built-in 'memory' cache stores responses in an in-memory map.
-- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
-- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
-- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
-- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
-- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
-- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
diff --git a/vendor/src/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/src/github.com/gregjones/httpcache/diskcache/diskcache.go
deleted file mode 100644
index 42e3129..0000000
--- a/vendor/src/github.com/gregjones/httpcache/diskcache/diskcache.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
-// to supplement an in-memory map with persistent storage
-//
-package diskcache
-
-import (
- "bytes"
- "crypto/md5"
- "encoding/hex"
- "github.com/peterbourgon/diskv"
- "io"
-)
-
-// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
-type Cache struct {
- d *diskv.Diskv
-}
-
-// Get returns the response corresponding to key if present
-func (c *Cache) Get(key string) (resp []byte, ok bool) {
- key = keyToFilename(key)
- resp, err := c.d.Read(key)
- if err != nil {
- return []byte{}, false
- }
- return resp, true
-}
-
-// Set saves a response to the cache as key
-func (c *Cache) Set(key string, resp []byte) {
- key = keyToFilename(key)
- c.d.WriteStream(key, bytes.NewReader(resp), true)
-}
-
-// Delete removes the response with key from the cache
-func (c *Cache) Delete(key string) {
- key = keyToFilename(key)
- c.d.Erase(key)
-}
-
-func keyToFilename(key string) string {
- h := md5.New()
- io.WriteString(h, key)
- return hex.EncodeToString(h.Sum(nil))
-}
-
-// New returns a new Cache that will store files in basePath
-func New(basePath string) *Cache {
- return &Cache{
- d: diskv.New(diskv.Options{
- BasePath: basePath,
- CacheSizeMax: 100 * 1024 * 1024, // 100MB
- }),
- }
-}
-
-// NewWithDiskv returns a new Cache using the provided Diskv as underlying
-// storage.
-func NewWithDiskv(d *diskv.Diskv) *Cache {
- return &Cache{d}
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/diskcache/diskcache_test.go b/vendor/src/github.com/gregjones/httpcache/diskcache/diskcache_test.go
deleted file mode 100644
index 35c76cb..0000000
--- a/vendor/src/github.com/gregjones/httpcache/diskcache/diskcache_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package diskcache
-
-import (
- "bytes"
- "io/ioutil"
- "os"
- "testing"
-)
-
-func TestDiskCache(t *testing.T) {
- tempDir, err := ioutil.TempDir("", "httpcache")
- if err != nil {
- t.Fatalf("TempDir: %v", err)
- }
- defer os.RemoveAll(tempDir)
-
- cache := New(tempDir)
-
- key := "testKey"
- _, ok := cache.Get(key)
- if ok {
- t.Fatal("retrieved key before adding it")
- }
-
- val := []byte("some bytes")
- cache.Set(key, val)
-
- retVal, ok := cache.Get(key)
- if !ok {
- t.Fatal("could not retrieve an element we just added")
- }
- if !bytes.Equal(retVal, val) {
- t.Fatal("retrieved a different value than what we put in")
- }
-
- cache.Delete(key)
-
- _, ok = cache.Get(key)
- if ok {
- t.Fatal("deleted key still present")
- }
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/httpcache.go b/vendor/src/github.com/gregjones/httpcache/httpcache.go
deleted file mode 100644
index ee6bf5c..0000000
--- a/vendor/src/github.com/gregjones/httpcache/httpcache.go
+++ /dev/null
@@ -1,594 +0,0 @@
-// Package httpcache provides a http.RoundTripper implementation that works as a
-// mostly RFC-compliant cache for http responses.
-//
-// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
-// and not for a shared proxy).
-//
-package httpcache
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "log"
- "net/http"
- "net/http/httputil"
- "strings"
- "sync"
- "time"
-)
-
-const (
- stale = iota
- fresh
- transparent
- // XFromCache is the header added to responses that are returned from the cache
- XFromCache = "X-From-Cache"
-)
-
-// A Cache interface is used by the Transport to store and retrieve responses.
-type Cache interface {
- // Get returns the []byte representation of a cached response and a bool
- // set to true if the value isn't empty
- Get(key string) (responseBytes []byte, ok bool)
- // Set stores the []byte representation of a response against a key
- Set(key string, responseBytes []byte)
- // Delete removes the value associated with the key
- Delete(key string)
-}
-
-// cacheKey returns the cache key for req.
-func cacheKey(req *http.Request) string {
- return req.URL.String()
-}
-
-// CachedResponse returns the cached http.Response for req if present, and nil
-// otherwise.
-func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
- cachedVal, ok := c.Get(cacheKey(req))
- if !ok {
- return
- }
-
- b := bytes.NewBuffer(cachedVal)
- return http.ReadResponse(bufio.NewReader(b), req)
-}
-
-// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
-type MemoryCache struct {
- mu sync.RWMutex
- items map[string][]byte
-}
-
-// Get returns the []byte representation of the response and true if present, false if not
-func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
- c.mu.RLock()
- resp, ok = c.items[key]
- c.mu.RUnlock()
- return resp, ok
-}
-
-// Set saves response resp to the cache with key
-func (c *MemoryCache) Set(key string, resp []byte) {
- c.mu.Lock()
- c.items[key] = resp
- c.mu.Unlock()
-}
-
-// Delete removes key from the cache
-func (c *MemoryCache) Delete(key string) {
- c.mu.Lock()
- delete(c.items, key)
- c.mu.Unlock()
-}
-
-// NewMemoryCache returns a new Cache that will store items in an in-memory map
-func NewMemoryCache() *MemoryCache {
- c := &MemoryCache{items: map[string][]byte{}}
- return c
-}
-
-// onEOFReader executes a function on reader EOF or close
-type onEOFReader struct {
- rc io.ReadCloser
- fn func()
-}
-
-func (r *onEOFReader) Read(p []byte) (n int, err error) {
- n, err = r.rc.Read(p)
- if err == io.EOF {
- r.runFunc()
- }
- return
-}
-
-func (r *onEOFReader) Close() error {
- err := r.rc.Close()
- r.runFunc()
- return err
-}
-
-func (r *onEOFReader) runFunc() {
- if fn := r.fn; fn != nil {
- fn()
- r.fn = nil
- }
-}
-
-// Transport is an implementation of http.RoundTripper that will return values from a cache
-// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
-// to repeated requests allowing servers to return 304 / Not Modified
-type Transport struct {
- // The RoundTripper interface actually used to make requests
- // If nil, http.DefaultTransport is used
- Transport http.RoundTripper
- Cache Cache
- // If true, responses returned from the cache will be given an extra header, X-From-Cache
- MarkCachedResponses bool
- // guards modReq
- mu sync.RWMutex
- // Mapping of original request => cloned
- modReq map[*http.Request]*http.Request
-}
-
-// NewTransport returns a new Transport with the
-// provided Cache implementation and MarkCachedResponses set to true
-func NewTransport(c Cache) *Transport {
- return &Transport{Cache: c, MarkCachedResponses: true}
-}
-
-// Client returns an *http.Client that caches responses.
-func (t *Transport) Client() *http.Client {
- return &http.Client{Transport: t}
-}
-
-// varyMatches will return false unless all of the cached values for the headers listed in Vary
-// match the new request
-func varyMatches(cachedResp *http.Response, req *http.Request) bool {
- for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
- header = http.CanonicalHeaderKey(header)
- if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
- return false
- }
- }
- return true
-}
-
-// setModReq maintains a mapping between original requests and their associated cloned requests
-func (t *Transport) setModReq(orig, mod *http.Request) {
- t.mu.Lock()
- if t.modReq == nil {
- t.modReq = make(map[*http.Request]*http.Request)
- }
- if mod == nil {
- delete(t.modReq, orig)
- } else {
- t.modReq[orig] = mod
- }
- t.mu.Unlock()
-}
-
-// RoundTrip takes a Request and returns a Response
-//
-// If there is a fresh Response already in cache, then it will be returned without connecting to
-// the server.
-//
-// If there is a stale Response, then any validators it contains will be set on the new request
-// to give the server a chance to respond with NotModified. If this happens, then the cached Response
-// will be returned.
-func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
- cacheKey := cacheKey(req)
- cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
- var cachedResp *http.Response
- if cacheable {
- cachedResp, err = CachedResponse(t.Cache, req)
- } else {
- // Need to invalidate an existing value
- t.Cache.Delete(cacheKey)
- }
-
- transport := t.Transport
- if transport == nil {
- transport = http.DefaultTransport
- }
-
- if cacheable && cachedResp != nil && err == nil {
- if t.MarkCachedResponses {
- cachedResp.Header.Set(XFromCache, "1")
- }
-
- if varyMatches(cachedResp, req) {
- // Can only use cached value if the new request doesn't Vary significantly
- freshness := getFreshness(cachedResp.Header, req.Header)
- if freshness == fresh {
- return cachedResp, nil
- }
-
- if freshness == stale {
- var req2 *http.Request
- // Add validators if caller hasn't already done so
- etag := cachedResp.Header.Get("etag")
- if etag != "" && req.Header.Get("etag") == "" {
- req2 = cloneRequest(req)
- req2.Header.Set("if-none-match", etag)
- }
- lastModified := cachedResp.Header.Get("last-modified")
- if lastModified != "" && req.Header.Get("last-modified") == "" {
- if req2 == nil {
- req2 = cloneRequest(req)
- }
- req2.Header.Set("if-modified-since", lastModified)
- }
- if req2 != nil {
- // Associate original request with cloned request so we can refer to
- // it in CancelRequest(). Release the mapping when it's no longer needed.
- t.setModReq(req, req2)
- defer func(originalReq *http.Request) {
- // Release req/clone mapping on error
- if err != nil {
- t.setModReq(originalReq, nil)
- }
- if resp != nil {
- // Release req/clone mapping on body close/EOF
- resp.Body = &onEOFReader{
- rc: resp.Body,
- fn: func() { t.setModReq(originalReq, nil) },
- }
- }
- }(req)
- req = req2
- }
- }
- }
-
- resp, err = transport.RoundTrip(req)
- if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
- // Replace the 304 response with the one from cache, but update with some new headers
- endToEndHeaders := getEndToEndHeaders(resp.Header)
- for _, header := range endToEndHeaders {
- cachedResp.Header[header] = resp.Header[header]
- }
- cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
- cachedResp.StatusCode = http.StatusOK
-
- resp = cachedResp
- } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
- req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
- // In case of transport failure and stale-if-error activated, returns cached content
- // when available
- cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
- cachedResp.StatusCode = http.StatusOK
- return cachedResp, nil
- } else {
- if err != nil || resp.StatusCode != http.StatusOK {
- t.Cache.Delete(cacheKey)
- }
- if err != nil {
- return nil, err
- }
- }
- } else {
- reqCacheControl := parseCacheControl(req.Header)
- if _, ok := reqCacheControl["only-if-cached"]; ok {
- resp = newGatewayTimeoutResponse(req)
- } else {
- resp, err = transport.RoundTrip(req)
- if err != nil {
- return nil, err
- }
- }
- }
-
- if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
- for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
- varyKey = http.CanonicalHeaderKey(varyKey)
- fakeHeader := "X-Varied-" + varyKey
- reqValue := req.Header.Get(varyKey)
- if reqValue != "" {
- resp.Header.Set(fakeHeader, reqValue)
- }
- }
- respBytes, err := httputil.DumpResponse(resp, true)
- if err == nil {
- t.Cache.Set(cacheKey, respBytes)
- }
- } else {
- t.Cache.Delete(cacheKey)
- }
- return resp, nil
-}
-
-// CancelRequest calls CancelRequest on the underlaying transport if implemented or
-// throw a warning otherwise.
-func (t *Transport) CancelRequest(req *http.Request) {
- type canceler interface {
- CancelRequest(*http.Request)
- }
- tr, ok := t.Transport.(canceler)
- if !ok {
- log.Printf("httpcache: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", t.Transport)
- return
- }
-
- t.mu.RLock()
- if modReq, ok := t.modReq[req]; ok {
- t.mu.RUnlock()
- t.mu.Lock()
- delete(t.modReq, req)
- t.mu.Unlock()
- tr.CancelRequest(modReq)
- } else {
- t.mu.RUnlock()
- tr.CancelRequest(req)
- }
-}
-
-// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
-var ErrNoDateHeader = errors.New("no Date header")
-
-// Date parses and returns the value of the Date header.
-func Date(respHeaders http.Header) (date time.Time, err error) {
- dateHeader := respHeaders.Get("date")
- if dateHeader == "" {
- err = ErrNoDateHeader
- return
- }
-
- return time.Parse(time.RFC1123, dateHeader)
-}
-
-type realClock struct{}
-
-func (c *realClock) since(d time.Time) time.Duration {
- return time.Since(d)
-}
-
-type timer interface {
- since(d time.Time) time.Duration
-}
-
-var clock timer = &realClock{}
-
-// getFreshness will return one of fresh/stale/transparent based on the cache-control
-// values of the request and the response
-//
-// fresh indicates the response can be returned
-// stale indicates that the response needs validating before it is returned
-// transparent indicates the response should not be used to fulfil the request
-//
-// Because this is only a private cache, 'public' and 'private' in cache-control aren't
-// signficant. Similarly, smax-age isn't used.
-func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
- respCacheControl := parseCacheControl(respHeaders)
- reqCacheControl := parseCacheControl(reqHeaders)
- if _, ok := reqCacheControl["no-cache"]; ok {
- return transparent
- }
- if _, ok := respCacheControl["no-cache"]; ok {
- return stale
- }
- if _, ok := reqCacheControl["only-if-cached"]; ok {
- return fresh
- }
-
- date, err := Date(respHeaders)
- if err != nil {
- return stale
- }
- currentAge := clock.since(date)
-
- var lifetime time.Duration
- var zeroDuration time.Duration
-
- // If a response includes both an Expires header and a max-age directive,
- // the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
- if maxAge, ok := respCacheControl["max-age"]; ok {
- lifetime, err = time.ParseDuration(maxAge + "s")
- if err != nil {
- lifetime = zeroDuration
- }
- } else {
- expiresHeader := respHeaders.Get("Expires")
- if expiresHeader != "" {
- expires, err := time.Parse(time.RFC1123, expiresHeader)
- if err != nil {
- lifetime = zeroDuration
- } else {
- lifetime = expires.Sub(date)
- }
- }
- }
-
- if maxAge, ok := reqCacheControl["max-age"]; ok {
- // the client is willing to accept a response whose age is no greater than the specified time in seconds
- lifetime, err = time.ParseDuration(maxAge + "s")
- if err != nil {
- lifetime = zeroDuration
- }
- }
- if minfresh, ok := reqCacheControl["min-fresh"]; ok {
- // the client wants a response that will still be fresh for at least the specified number of seconds.
- minfreshDuration, err := time.ParseDuration(minfresh + "s")
- if err == nil {
- currentAge = time.Duration(currentAge + minfreshDuration)
- }
- }
-
- if maxstale, ok := reqCacheControl["max-stale"]; ok {
- // Indicates that the client is willing to accept a response that has exceeded its expiration time.
- // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
- // its expiration time by no more than the specified number of seconds.
- // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
- //
- // Responses served only because of a max-stale value are supposed to have a Warning header added to them,
- // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
- // return-value available here.
- if maxstale == "" {
- return fresh
- }
- maxstaleDuration, err := time.ParseDuration(maxstale + "s")
- if err == nil {
- currentAge = time.Duration(currentAge - maxstaleDuration)
- }
- }
-
- if lifetime > currentAge {
- return fresh
- }
-
- return stale
-}
-
-// Returns true if either the request or the response includes the stale-if-error
-// cache control extension: https://tools.ietf.org/html/rfc5861
-func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
- respCacheControl := parseCacheControl(respHeaders)
- reqCacheControl := parseCacheControl(reqHeaders)
-
- var err error
- lifetime := time.Duration(-1)
-
- if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
- if staleMaxAge != "" {
- lifetime, err = time.ParseDuration(staleMaxAge + "s")
- if err != nil {
- return false
- }
- } else {
- return true
- }
- }
- if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
- if staleMaxAge != "" {
- lifetime, err = time.ParseDuration(staleMaxAge + "s")
- if err != nil {
- return false
- }
- } else {
- return true
- }
- }
-
- if lifetime >= 0 {
- date, err := Date(respHeaders)
- if err != nil {
- return false
- }
- currentAge := clock.since(date)
- if lifetime > currentAge {
- return true
- }
- }
-
- return false
-}
-
-func getEndToEndHeaders(respHeaders http.Header) []string {
- // These headers are always hop-by-hop
- hopByHopHeaders := map[string]struct{}{
- "Connection": struct{}{},
- "Keep-Alive": struct{}{},
- "Proxy-Authenticate": struct{}{},
- "Proxy-Authorization": struct{}{},
- "Te": struct{}{},
- "Trailers": struct{}{},
- "Transfer-Encoding": struct{}{},
- "Upgrade": struct{}{},
- }
-
- for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
- // any header listed in connection, if present, is also considered hop-by-hop
- if strings.Trim(extra, " ") != "" {
- hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
- }
- }
- endToEndHeaders := []string{}
- for respHeader, _ := range respHeaders {
- if _, ok := hopByHopHeaders[respHeader]; !ok {
- endToEndHeaders = append(endToEndHeaders, respHeader)
- }
- }
- return endToEndHeaders
-}
-
-func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
- if _, ok := respCacheControl["no-store"]; ok {
- return false
- }
- if _, ok := reqCacheControl["no-store"]; ok {
- return false
- }
- return true
-}
-
-func newGatewayTimeoutResponse(req *http.Request) *http.Response {
- var braw bytes.Buffer
- braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
- resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
- if err != nil {
- panic(err)
- }
- return resp
-}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
-func cloneRequest(r *http.Request) *http.Request {
- // shallow copy of the struct
- r2 := new(http.Request)
- *r2 = *r
- // deep copy of the Header
- r2.Header = make(http.Header)
- for k, s := range r.Header {
- r2.Header[k] = s
- }
- return r2
-}
-
-type cacheControl map[string]string
-
-func parseCacheControl(headers http.Header) cacheControl {
- cc := cacheControl{}
- ccHeader := headers.Get("Cache-Control")
- for _, part := range strings.Split(ccHeader, ",") {
- part = strings.Trim(part, " ")
- if part == "" {
- continue
- }
- if strings.ContainsRune(part, '=') {
- keyval := strings.Split(part, "=")
- cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
- } else {
- cc[part] = ""
- }
- }
- return cc
-}
-
-// headerAllCommaSepValues returns all comma-separated values (each
-// with whitespace trimmed) for header name in headers. According to
-// Section 4.2 of the HTTP/1.1 spec
-// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
-// values from multiple occurrences of a header should be concatenated, if
-// the header's value is a comma-separated list.
-func headerAllCommaSepValues(headers http.Header, name string) []string {
- var vals []string
- for _, val := range headers[http.CanonicalHeaderKey(name)] {
- fields := strings.Split(val, ",")
- for i, f := range fields {
- fields[i] = strings.TrimSpace(f)
- }
- vals = append(vals, fields...)
- }
- return vals
-}
-
-// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
-func NewMemoryCacheTransport() *Transport {
- c := NewMemoryCache()
- t := NewTransport(c)
- return t
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/httpcache_test.go b/vendor/src/github.com/gregjones/httpcache/httpcache_test.go
deleted file mode 100644
index 0ef7bab..0000000
--- a/vendor/src/github.com/gregjones/httpcache/httpcache_test.go
+++ /dev/null
@@ -1,1213 +0,0 @@
-package httpcache
-
-import (
- "bytes"
- "errors"
- "flag"
- "io"
- "io/ioutil"
- "net/http"
- "net/http/httptest"
- "os"
- "strconv"
- "testing"
- "time"
-)
-
-var s struct {
- server *httptest.Server
- client http.Client
- transport *Transport
-}
-
-type fakeClock struct {
- elapsed time.Duration
-}
-
-func (c *fakeClock) since(t time.Time) time.Duration {
- return c.elapsed
-}
-
-func TestMain(m *testing.M) {
- flag.Parse()
- setup()
- code := m.Run()
- teardown()
- os.Exit(code)
-}
-
-func setup() {
- tp := NewMemoryCacheTransport()
- client := http.Client{Transport: tp}
- s.transport = tp
- s.client = client
-
- mux := http.NewServeMux()
- s.server = httptest.NewServer(mux)
-
- mux.HandleFunc("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Cache-Control", "max-age=3600")
- }))
-
- mux.HandleFunc("/method", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Cache-Control", "max-age=3600")
- w.Write([]byte(r.Method))
- }))
-
- mux.HandleFunc("/range", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- lm := "Fri, 14 Dec 2010 01:01:50 GMT"
- if r.Header.Get("if-modified-since") == lm {
- w.WriteHeader(http.StatusNotModified)
- return
- }
- w.Header().Set("last-modified", lm)
- if r.Header.Get("range") == "bytes=4-9" {
- w.WriteHeader(http.StatusPartialContent)
- w.Write([]byte(" text "))
- return
- }
- w.Write([]byte("Some text content"))
- }))
-
- mux.HandleFunc("/nostore", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Cache-Control", "no-store")
- }))
-
- mux.HandleFunc("/etag", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- etag := "124567"
- if r.Header.Get("if-none-match") == etag {
- w.WriteHeader(http.StatusNotModified)
- return
- }
- w.Header().Set("etag", etag)
- }))
-
- mux.HandleFunc("/lastmodified", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- lm := "Fri, 14 Dec 2010 01:01:50 GMT"
- if r.Header.Get("if-modified-since") == lm {
- w.WriteHeader(http.StatusNotModified)
- return
- }
- w.Header().Set("last-modified", lm)
- }))
-
- mux.HandleFunc("/varyaccept", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Cache-Control", "max-age=3600")
- w.Header().Set("Content-Type", "text/plain")
- w.Header().Set("Vary", "Accept")
- w.Write([]byte("Some text content"))
- }))
-
- mux.HandleFunc("/doublevary", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Cache-Control", "max-age=3600")
- w.Header().Set("Content-Type", "text/plain")
- w.Header().Set("Vary", "Accept, Accept-Language")
- w.Write([]byte("Some text content"))
- }))
- mux.HandleFunc("/2varyheaders", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Cache-Control", "max-age=3600")
- w.Header().Set("Content-Type", "text/plain")
- w.Header().Add("Vary", "Accept")
- w.Header().Add("Vary", "Accept-Language")
- w.Write([]byte("Some text content"))
- }))
- mux.HandleFunc("/varyunused", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Cache-Control", "max-age=3600")
- w.Header().Set("Content-Type", "text/plain")
- w.Header().Set("Vary", "X-Madeup-Header")
- w.Write([]byte("Some text content"))
- }))
-
- updateFieldsCounter := 0
- mux.HandleFunc("/updatefields", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("X-Counter", strconv.Itoa(updateFieldsCounter))
- w.Header().Set("Etag", `"e"`)
- updateFieldsCounter++
- if r.Header.Get("if-none-match") != "" {
- w.WriteHeader(http.StatusNotModified)
- return
- }
- w.Write([]byte("Some text content"))
- }))
-}
-
-func teardown() {
- s.server.Close()
-}
-
-func resetTest() {
- s.transport.Cache = NewMemoryCache()
- clock = &realClock{}
-}
-
-// TestCacheableMethod ensures that uncacheable method does not get stored
-// in cache and get incorrectly used for a following cacheable method request.
-func TestCacheableMethod(t *testing.T) {
- resetTest()
- {
- req, err := http.NewRequest("POST", s.server.URL+"/method", nil)
- if err != nil {
- t.Fatal(err)
- }
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- var buf bytes.Buffer
- _, err = io.Copy(&buf, resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- err = resp.Body.Close()
- if err != nil {
- t.Fatal(err)
- }
- if got, want := buf.String(), "POST"; got != want {
- t.Errorf("got %q, want %q", got, want)
- }
- if resp.StatusCode != http.StatusOK {
- t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode)
- }
- }
- {
- req, err := http.NewRequest("GET", s.server.URL+"/method", nil)
- if err != nil {
- t.Fatal(err)
- }
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- var buf bytes.Buffer
- _, err = io.Copy(&buf, resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- err = resp.Body.Close()
- if err != nil {
- t.Fatal(err)
- }
- if got, want := buf.String(), "GET"; got != want {
- t.Errorf("got wrong body %q, want %q", got, want)
- }
- if resp.StatusCode != http.StatusOK {
- t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode)
- }
- if resp.Header.Get(XFromCache) != "" {
- t.Errorf("XFromCache header isn't blank")
- }
- }
-}
-
-func TestDontStorePartialRangeInCache(t *testing.T) {
- resetTest()
- {
- req, err := http.NewRequest("GET", s.server.URL+"/range", nil)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set("range", "bytes=4-9")
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- var buf bytes.Buffer
- _, err = io.Copy(&buf, resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- err = resp.Body.Close()
- if err != nil {
- t.Fatal(err)
- }
- if got, want := buf.String(), " text "; got != want {
- t.Errorf("got %q, want %q", got, want)
- }
- if resp.StatusCode != http.StatusPartialContent {
- t.Errorf("response status code isn't 206 Partial Content: %v", resp.StatusCode)
- }
- }
- {
- req, err := http.NewRequest("GET", s.server.URL+"/range", nil)
- if err != nil {
- t.Fatal(err)
- }
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- var buf bytes.Buffer
- _, err = io.Copy(&buf, resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- err = resp.Body.Close()
- if err != nil {
- t.Fatal(err)
- }
- if got, want := buf.String(), "Some text content"; got != want {
- t.Errorf("got %q, want %q", got, want)
- }
- if resp.StatusCode != http.StatusOK {
- t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode)
- }
- if resp.Header.Get(XFromCache) != "" {
- t.Error("XFromCache header isn't blank")
- }
- }
- {
- req, err := http.NewRequest("GET", s.server.URL+"/range", nil)
- if err != nil {
- t.Fatal(err)
- }
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- var buf bytes.Buffer
- _, err = io.Copy(&buf, resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- err = resp.Body.Close()
- if err != nil {
- t.Fatal(err)
- }
- if got, want := buf.String(), "Some text content"; got != want {
- t.Errorf("got %q, want %q", got, want)
- }
- if resp.StatusCode != http.StatusOK {
- t.Errorf("response status code isn't 200 OK: %v", resp.StatusCode)
- }
- if resp.Header.Get(XFromCache) != "1" {
- t.Errorf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache))
- }
- }
- {
- req, err := http.NewRequest("GET", s.server.URL+"/range", nil)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set("range", "bytes=4-9")
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- var buf bytes.Buffer
- _, err = io.Copy(&buf, resp.Body)
- if err != nil {
- t.Fatal(err)
- }
- err = resp.Body.Close()
- if err != nil {
- t.Fatal(err)
- }
- if got, want := buf.String(), " text "; got != want {
- t.Errorf("got %q, want %q", got, want)
- }
- if resp.StatusCode != http.StatusPartialContent {
- t.Errorf("response status code isn't 206 Partial Content: %v", resp.StatusCode)
- }
- }
-}
-
-func TestGetOnlyIfCachedHit(t *testing.T) {
- resetTest()
- {
- req, err := http.NewRequest("GET", s.server.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- {
- req, err := http.NewRequest("GET", s.server.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Add("cache-control", "only-if-cached")
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "1" {
- t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache))
- }
- if resp.StatusCode != http.StatusOK {
- t.Fatalf("response status code isn't 200 OK: %v", resp.StatusCode)
- }
- }
-}
-
-func TestGetOnlyIfCachedMiss(t *testing.T) {
- resetTest()
- req, err := http.NewRequest("GET", s.server.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Add("cache-control", "only-if-cached")
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- if resp.StatusCode != http.StatusGatewayTimeout {
- t.Fatalf("response status code isn't 504 GatewayTimeout: %v", resp.StatusCode)
- }
-}
-
-func TestGetNoStoreRequest(t *testing.T) {
- resetTest()
- req, err := http.NewRequest("GET", s.server.URL, nil)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Add("Cache-Control", "no-store")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
-}
-
-func TestGetNoStoreResponse(t *testing.T) {
- resetTest()
- req, err := http.NewRequest("GET", s.server.URL+"/nostore", nil)
- if err != nil {
- t.Fatal(err)
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
-}
-
-func TestGetWithEtag(t *testing.T) {
- resetTest()
- req, err := http.NewRequest("GET", s.server.URL+"/etag", nil)
- if err != nil {
- t.Fatal(err)
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "1" {
- t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache))
- }
- // additional assertions to verify that 304 response is converted properly
- if resp.StatusCode != http.StatusOK {
- t.Fatalf("response status code isn't 200 OK: %v", resp.StatusCode)
- }
- if _, ok := resp.Header["Connection"]; ok {
- t.Fatalf("Connection header isn't absent")
- }
- }
-}
-
-func TestGetWithLastModified(t *testing.T) {
- resetTest()
- req, err := http.NewRequest("GET", s.server.URL+"/lastmodified", nil)
- if err != nil {
- t.Fatal(err)
- }
- defer func() {
- if len(s.transport.modReq) != 0 {
- t.Errorf("Request-map is not empty")
- }
- }()
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "1" {
- t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache))
- }
- }
-}
-
-func TestGetWithVary(t *testing.T) {
- resetTest()
- req, err := http.NewRequest("GET", s.server.URL+"/varyaccept", nil)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set("Accept", "text/plain")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get("Vary") != "Accept" {
- t.Fatalf(`Vary header isn't "Accept": %v`, resp.Header.Get("Vary"))
- }
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "1" {
- t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache))
- }
- }
- req.Header.Set("Accept", "text/html")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- req.Header.Set("Accept", "")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
-}
-
-func TestGetWithDoubleVary(t *testing.T) {
- resetTest()
- req, err := http.NewRequest("GET", s.server.URL+"/doublevary", nil)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set("Accept", "text/plain")
- req.Header.Set("Accept-Language", "da, en-gb;q=0.8, en;q=0.7")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get("Vary") == "" {
- t.Fatalf(`Vary header is blank`)
- }
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "1" {
- t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache))
- }
- }
- req.Header.Set("Accept-Language", "")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- req.Header.Set("Accept-Language", "da")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
-}
-
-func TestGetWith2VaryHeaders(t *testing.T) {
- resetTest()
- // Tests that multiple Vary headers' comma-separated lists are
- // merged. See https://github.com/gregjones/httpcache/issues/27.
- const (
- accept = "text/plain"
- acceptLanguage = "da, en-gb;q=0.8, en;q=0.7"
- )
- req, err := http.NewRequest("GET", s.server.URL+"/2varyheaders", nil)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set("Accept", accept)
- req.Header.Set("Accept-Language", acceptLanguage)
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get("Vary") == "" {
- t.Fatalf(`Vary header is blank`)
- }
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "1" {
- t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache))
- }
- }
- req.Header.Set("Accept-Language", "")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- req.Header.Set("Accept-Language", "da")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- req.Header.Set("Accept-Language", acceptLanguage)
- req.Header.Set("Accept", "")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- req.Header.Set("Accept", "image/png")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "" {
- t.Fatal("XFromCache header isn't blank")
- }
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "1" {
- t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache))
- }
- }
-}
-
-func TestGetVaryUnused(t *testing.T) {
- resetTest()
- req, err := http.NewRequest("GET", s.server.URL+"/varyunused", nil)
- if err != nil {
- t.Fatal(err)
- }
- req.Header.Set("Accept", "text/plain")
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get("Vary") == "" {
- t.Fatalf(`Vary header is blank`)
- }
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "1" {
- t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache))
- }
- }
-}
-
-func TestUpdateFields(t *testing.T) {
- resetTest()
- req, err := http.NewRequest("GET", s.server.URL+"/updatefields", nil)
- if err != nil {
- t.Fatal(err)
- }
- var counter, counter2 string
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- counter = resp.Header.Get("x-counter")
- }
- {
- resp, err := s.client.Do(req)
- if err != nil {
- t.Fatal(err)
- }
- defer resp.Body.Close()
- if resp.Header.Get(XFromCache) != "1" {
- t.Fatalf(`XFromCache header isn't "1": %v`, resp.Header.Get(XFromCache))
- }
- counter2 = resp.Header.Get("x-counter")
- }
- if counter == counter2 {
- t.Fatalf(`both "x-counter" values are equal: %v %v`, counter, counter2)
- }
-}
-
-func TestParseCacheControl(t *testing.T) {
- resetTest()
- h := http.Header{}
- for range parseCacheControl(h) {
- t.Fatal("cacheControl should be empty")
- }
-
- h.Set("cache-control", "no-cache")
- {
- cc := parseCacheControl(h)
- if _, ok := cc["foo"]; ok {
- t.Error(`Value "foo" shouldn't exist`)
- }
- noCache, ok := cc["no-cache"]
- if !ok {
- t.Fatalf(`"no-cache" value isn't set`)
- }
- if noCache != "" {
- t.Fatalf(`"no-cache" value isn't blank: %v`, noCache)
- }
- }
- h.Set("cache-control", "no-cache, max-age=3600")
- {
- cc := parseCacheControl(h)
- noCache, ok := cc["no-cache"]
- if !ok {
- t.Fatalf(`"no-cache" value isn't set`)
- }
- if noCache != "" {
- t.Fatalf(`"no-cache" value isn't blank: %v`, noCache)
- }
- if cc["max-age"] != "3600" {
- t.Fatalf(`"max-age" value isn't "3600": %v`, cc["max-age"])
- }
- }
-}
-
-func TestNoCacheRequestExpiration(t *testing.T) {
- resetTest()
- respHeaders := http.Header{}
- respHeaders.Set("Cache-Control", "max-age=7200")
-
- reqHeaders := http.Header{}
- reqHeaders.Set("Cache-Control", "no-cache")
- if getFreshness(respHeaders, reqHeaders) != transparent {
- t.Fatal("freshness isn't transparent")
- }
-}
-
-func TestNoCacheResponseExpiration(t *testing.T) {
- resetTest()
- respHeaders := http.Header{}
- respHeaders.Set("Cache-Control", "no-cache")
- respHeaders.Set("Expires", "Wed, 19 Apr 3000 11:43:00 GMT")
-
- reqHeaders := http.Header{}
- if getFreshness(respHeaders, reqHeaders) != stale {
- t.Fatal("freshness isn't stale")
- }
-}
-
-func TestReqMustRevalidate(t *testing.T) {
- resetTest()
- // not paying attention to request setting max-stale means never returning stale
- // responses, so always acting as if must-revalidate is set
- respHeaders := http.Header{}
-
- reqHeaders := http.Header{}
- reqHeaders.Set("Cache-Control", "must-revalidate")
- if getFreshness(respHeaders, reqHeaders) != stale {
- t.Fatal("freshness isn't stale")
- }
-}
-
-func TestRespMustRevalidate(t *testing.T) {
- resetTest()
- respHeaders := http.Header{}
- respHeaders.Set("Cache-Control", "must-revalidate")
-
- reqHeaders := http.Header{}
- if getFreshness(respHeaders, reqHeaders) != stale {
- t.Fatal("freshness isn't stale")
- }
-}
-
-func TestFreshExpiration(t *testing.T) {
- resetTest()
- now := time.Now()
- respHeaders := http.Header{}
- respHeaders.Set("date", now.Format(time.RFC1123))
- respHeaders.Set("expires", now.Add(time.Duration(2)*time.Second).Format(time.RFC1123))
-
- reqHeaders := http.Header{}
- if getFreshness(respHeaders, reqHeaders) != fresh {
- t.Fatal("freshness isn't fresh")
- }
-
- clock = &fakeClock{elapsed: 3 * time.Second}
- if getFreshness(respHeaders, reqHeaders) != stale {
- t.Fatal("freshness isn't stale")
- }
-}
-
-func TestMaxAge(t *testing.T) {
- resetTest()
- now := time.Now()
- respHeaders := http.Header{}
- respHeaders.Set("date", now.Format(time.RFC1123))
- respHeaders.Set("cache-control", "max-age=2")
-
- reqHeaders := http.Header{}
- if getFreshness(respHeaders, reqHeaders) != fresh {
- t.Fatal("freshness isn't fresh")
- }
-
- clock = &fakeClock{elapsed: 3 * time.Second}
- if getFreshness(respHeaders, reqHeaders) != stale {
- t.Fatal("freshness isn't stale")
- }
-}
-
-func TestMaxAgeZero(t *testing.T) {
- resetTest()
- now := time.Now()
- respHeaders := http.Header{}
- respHeaders.Set("date", now.Format(time.RFC1123))
- respHeaders.Set("cache-control", "max-age=0")
-
- reqHeaders := http.Header{}
- if getFreshness(respHeaders, reqHeaders) != stale {
- t.Fatal("freshness isn't stale")
- }
-}
-
-func TestBothMaxAge(t *testing.T) {
- resetTest()
- now := time.Now()
- respHeaders := http.Header{}
- respHeaders.Set("date", now.Format(time.RFC1123))
- respHeaders.Set("cache-control", "max-age=2")
-
- reqHeaders := http.Header{}
- reqHeaders.Set("cache-control", "max-age=0")
- if getFreshness(respHeaders, reqHeaders) != stale {
- t.Fatal("freshness isn't stale")
- }
-}
-
-func TestMinFreshWithExpires(t *testing.T) {
- resetTest()
- now := time.Now()
- respHeaders := http.Header{}
- respHeaders.Set("date", now.Format(time.RFC1123))
- respHeaders.Set("expires", now.Add(time.Duration(2)*time.Second).Format(time.RFC1123))
-
- reqHeaders := http.Header{}
- reqHeaders.Set("cache-control", "min-fresh=1")
- if getFreshness(respHeaders, reqHeaders) != fresh {
- t.Fatal("freshness isn't fresh")
- }
-
- reqHeaders = http.Header{}
- reqHeaders.Set("cache-control", "min-fresh=2")
- if getFreshness(respHeaders, reqHeaders) != stale {
- t.Fatal("freshness isn't stale")
- }
-}
-
-func TestEmptyMaxStale(t *testing.T) {
- resetTest()
- now := time.Now()
- respHeaders := http.Header{}
- respHeaders.Set("date", now.Format(time.RFC1123))
- respHeaders.Set("cache-control", "max-age=20")
-
- reqHeaders := http.Header{}
- reqHeaders.Set("cache-control", "max-stale")
- clock = &fakeClock{elapsed: 10 * time.Second}
- if getFreshness(respHeaders, reqHeaders) != fresh {
- t.Fatal("freshness isn't fresh")
- }
-
- clock = &fakeClock{elapsed: 60 * time.Second}
- if getFreshness(respHeaders, reqHeaders) != fresh {
- t.Fatal("freshness isn't fresh")
- }
-}
-
-func TestMaxStaleValue(t *testing.T) {
- resetTest()
- now := time.Now()
- respHeaders := http.Header{}
- respHeaders.Set("date", now.Format(time.RFC1123))
- respHeaders.Set("cache-control", "max-age=10")
-
- reqHeaders := http.Header{}
- reqHeaders.Set("cache-control", "max-stale=20")
- clock = &fakeClock{elapsed: 5 * time.Second}
- if getFreshness(respHeaders, reqHeaders) != fresh {
- t.Fatal("freshness isn't fresh")
- }
-
- clock = &fakeClock{elapsed: 15 * time.Second}
- if getFreshness(respHeaders, reqHeaders) != fresh {
- t.Fatal("freshness isn't fresh")
- }
-
- clock = &fakeClock{elapsed: 30 * time.Second}
- if getFreshness(respHeaders, reqHeaders) != stale {
- t.Fatal("freshness isn't stale")
- }
-}
-
-func containsHeader(headers []string, header string) bool {
- for _, v := range headers {
- if http.CanonicalHeaderKey(v) == http.CanonicalHeaderKey(header) {
- return true
- }
- }
- return false
-}
-
-func TestGetEndToEndHeaders(t *testing.T) {
- resetTest()
- var (
- headers http.Header
- end2end []string
- )
-
- headers = http.Header{}
- headers.Set("content-type", "text/html")
- headers.Set("te", "deflate")
-
- end2end = getEndToEndHeaders(headers)
- if !containsHeader(end2end, "content-type") {
- t.Fatal(`doesn't contain "content-type" header`)
- }
- if containsHeader(end2end, "te") {
- t.Fatal(`doesn't contain "te" header`)
- }
-
- headers = http.Header{}
- headers.Set("connection", "content-type")
- headers.Set("content-type", "text/csv")
- headers.Set("te", "deflate")
- end2end = getEndToEndHeaders(headers)
- if containsHeader(end2end, "connection") {
- t.Fatal(`doesn't contain "connection" header`)
- }
- if containsHeader(end2end, "content-type") {
- t.Fatal(`doesn't contain "content-type" header`)
- }
- if containsHeader(end2end, "te") {
- t.Fatal(`doesn't contain "te" header`)
- }
-
- headers = http.Header{}
- end2end = getEndToEndHeaders(headers)
- if len(end2end) != 0 {
- t.Fatal(`non-zero end2end headers`)
- }
-
- headers = http.Header{}
- headers.Set("connection", "content-type")
- end2end = getEndToEndHeaders(headers)
- if len(end2end) != 0 {
- t.Fatal(`non-zero end2end headers`)
- }
-}
-
-type transportMock struct {
- response *http.Response
- err error
-}
-
-func (t transportMock) RoundTrip(req *http.Request) (resp *http.Response, err error) {
- return t.response, t.err
-}
-
-func TestStaleIfErrorRequest(t *testing.T) {
- resetTest()
- now := time.Now()
- tmock := transportMock{
- response: &http.Response{
- Status: http.StatusText(http.StatusOK),
- StatusCode: http.StatusOK,
- Header: http.Header{
- "Date": []string{now.Format(time.RFC1123)},
- "Cache-Control": []string{"no-cache"},
- },
- Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))),
- },
- err: nil,
- }
- tp := NewMemoryCacheTransport()
- tp.Transport = &tmock
-
- // First time, response is cached on success
- r, _ := http.NewRequest("GET", "http://somewhere.com/", nil)
- r.Header.Set("Cache-Control", "stale-if-error")
- resp, err := tp.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("resp is nil")
- }
-
- // On failure, response is returned from the cache
- tmock.response = nil
- tmock.err = errors.New("some error")
- resp, err = tp.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("resp is nil")
- }
-}
-
-func TestStaleIfErrorRequestLifetime(t *testing.T) {
- resetTest()
- now := time.Now()
- tmock := transportMock{
- response: &http.Response{
- Status: http.StatusText(http.StatusOK),
- StatusCode: http.StatusOK,
- Header: http.Header{
- "Date": []string{now.Format(time.RFC1123)},
- "Cache-Control": []string{"no-cache"},
- },
- Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))),
- },
- err: nil,
- }
- tp := NewMemoryCacheTransport()
- tp.Transport = &tmock
-
- // First time, response is cached on success
- r, _ := http.NewRequest("GET", "http://somewhere.com/", nil)
- r.Header.Set("Cache-Control", "stale-if-error=100")
- resp, err := tp.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("resp is nil")
- }
-
- // On failure, response is returned from the cache
- tmock.response = nil
- tmock.err = errors.New("some error")
- resp, err = tp.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("resp is nil")
- }
-
- // Same for http errors
- tmock.response = &http.Response{StatusCode: http.StatusInternalServerError}
- tmock.err = nil
- resp, err = tp.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("resp is nil")
- }
-
- // If failure last more than max stale, error is returned
- clock = &fakeClock{elapsed: 200 * time.Second}
- resp, err = tp.RoundTrip(r)
- if err != tmock.err {
- t.Fatalf("got err %v, want %v", err, tmock.err)
- }
-}
-
-func TestStaleIfErrorResponse(t *testing.T) {
- resetTest()
- now := time.Now()
- tmock := transportMock{
- response: &http.Response{
- Status: http.StatusText(http.StatusOK),
- StatusCode: http.StatusOK,
- Header: http.Header{
- "Date": []string{now.Format(time.RFC1123)},
- "Cache-Control": []string{"no-cache, stale-if-error"},
- },
- Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))),
- },
- err: nil,
- }
- tp := NewMemoryCacheTransport()
- tp.Transport = &tmock
-
- // First time, response is cached on success
- r, _ := http.NewRequest("GET", "http://somewhere.com/", nil)
- resp, err := tp.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("resp is nil")
- }
-
- // On failure, response is returned from the cache
- tmock.response = nil
- tmock.err = errors.New("some error")
- resp, err = tp.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("resp is nil")
- }
-}
-
-func TestStaleIfErrorResponseLifetime(t *testing.T) {
- resetTest()
- now := time.Now()
- tmock := transportMock{
- response: &http.Response{
- Status: http.StatusText(http.StatusOK),
- StatusCode: http.StatusOK,
- Header: http.Header{
- "Date": []string{now.Format(time.RFC1123)},
- "Cache-Control": []string{"no-cache, stale-if-error=100"},
- },
- Body: ioutil.NopCloser(bytes.NewBuffer([]byte("some data"))),
- },
- err: nil,
- }
- tp := NewMemoryCacheTransport()
- tp.Transport = &tmock
-
- // First time, response is cached on success
- r, _ := http.NewRequest("GET", "http://somewhere.com/", nil)
- resp, err := tp.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("resp is nil")
- }
-
- // On failure, response is returned from the cache
- tmock.response = nil
- tmock.err = errors.New("some error")
- resp, err = tp.RoundTrip(r)
- if err != nil {
- t.Fatal(err)
- }
- if resp == nil {
- t.Fatal("resp is nil")
- }
-
- // If failure last more than max stale, error is returned
- clock = &fakeClock{elapsed: 200 * time.Second}
- resp, err = tp.RoundTrip(r)
- if err != tmock.err {
- t.Fatalf("got err %v, want %v", err, tmock.err)
- }
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/leveldbcache/leveldbcache.go b/vendor/src/github.com/gregjones/httpcache/leveldbcache/leveldbcache.go
deleted file mode 100644
index 9bcb7e2..0000000
--- a/vendor/src/github.com/gregjones/httpcache/leveldbcache/leveldbcache.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Package leveldbcache provides an implementation of httpcache.Cache that
-// uses github.com/syndtr/goleveldb/leveldb
-package leveldbcache
-
-import (
- "github.com/syndtr/goleveldb/leveldb"
-)
-
-// Cache is an implementation of httpcache.Cache with leveldb storage
-type Cache struct {
- db *leveldb.DB
-}
-
-// Get returns the response corresponding to key if present
-func (c *Cache) Get(key string) (resp []byte, ok bool) {
- var err error
- resp, err = c.db.Get([]byte(key), nil)
- if err != nil {
- return []byte{}, false
- }
- return resp, true
-}
-
-// Set saves a response to the cache as key
-func (c *Cache) Set(key string, resp []byte) {
- c.db.Put([]byte(key), resp, nil)
-}
-
-// Delete removes the response with key from the cache
-func (c *Cache) Delete(key string) {
- c.db.Delete([]byte(key), nil)
-}
-
-// New returns a new Cache that will store leveldb in path
-func New(path string) (*Cache, error) {
- cache := &Cache{}
-
- var err error
- cache.db, err = leveldb.OpenFile(path, nil)
-
- if err != nil {
- return nil, err
- }
- return cache, nil
-}
-
-// NewWithDB returns a new Cache using the provided leveldb as underlying
-// storage.
-func NewWithDB(db *leveldb.DB) *Cache {
- return &Cache{db}
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/leveldbcache/leveldbcache_test.go b/vendor/src/github.com/gregjones/httpcache/leveldbcache/leveldbcache_test.go
deleted file mode 100644
index b885c01..0000000
--- a/vendor/src/github.com/gregjones/httpcache/leveldbcache/leveldbcache_test.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package leveldbcache
-
-import (
- "bytes"
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
-)
-
-func TestDiskCache(t *testing.T) {
- tempDir, err := ioutil.TempDir("", "httpcache")
- if err != nil {
- t.Fatalf("TempDir: %v", err)
- }
- defer os.RemoveAll(tempDir)
-
- cache, err := New(filepath.Join(tempDir, "db"))
- if err != nil {
- t.Fatalf("New leveldb,: %v", err)
- }
-
- key := "testKey"
- _, ok := cache.Get(key)
- if ok {
- t.Fatal("retrieved key before adding it")
- }
-
- val := []byte("some bytes")
- cache.Set(key, val)
-
- retVal, ok := cache.Get(key)
- if !ok {
- t.Fatal("could not retrieve an element we just added")
- }
- if !bytes.Equal(retVal, val) {
- t.Fatal("retrieved a different value than what we put in")
- }
-
- cache.Delete(key)
-
- _, ok = cache.Get(key)
- if ok {
- t.Fatal("deleted key still present")
- }
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/memcache/appengine.go b/vendor/src/github.com/gregjones/httpcache/memcache/appengine.go
deleted file mode 100644
index e68d9bc..0000000
--- a/vendor/src/github.com/gregjones/httpcache/memcache/appengine.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// +build appengine
-
-// Package memcache provides an implementation of httpcache.Cache that uses App
-// Engine's memcache package to store cached responses.
-//
-// When not built for Google App Engine, this package will provide an
-// implementation that connects to a specified memcached server. See the
-// memcache.go file in this package for details.
-package memcache
-
-import (
- "appengine"
- "appengine/memcache"
-)
-
-// Cache is an implementation of httpcache.Cache that caches responses in App
-// Engine's memcache.
-type Cache struct {
- appengine.Context
-}
-
-// cacheKey modifies an httpcache key for use in memcache. Specifically, it
-// prefixes keys to avoid collision with other data stored in memcache.
-func cacheKey(key string) string {
- return "httpcache:" + key
-}
-
-// Get returns the response corresponding to key if present.
-func (c *Cache) Get(key string) (resp []byte, ok bool) {
- item, err := memcache.Get(c.Context, cacheKey(key))
- if err != nil {
- if err != memcache.ErrCacheMiss {
- c.Context.Errorf("error getting cached response: %v", err)
- }
- return nil, false
- }
- return item.Value, true
-}
-
-// Set saves a response to the cache as key.
-func (c *Cache) Set(key string, resp []byte) {
- item := &memcache.Item{
- Key: cacheKey(key),
- Value: resp,
- }
- if err := memcache.Set(c.Context, item); err != nil {
- c.Context.Errorf("error caching response: %v", err)
- }
-}
-
-// Delete removes the response with key from the cache.
-func (c *Cache) Delete(key string) {
- if err := memcache.Delete(c.Context, cacheKey(key)); err != nil {
- c.Context.Errorf("error deleting cached response: %v", err)
- }
-}
-
-// New returns a new Cache for the given context.
-func New(ctx appengine.Context) *Cache {
- return &Cache{ctx}
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/memcache/appengine_test.go b/vendor/src/github.com/gregjones/httpcache/memcache/appengine_test.go
deleted file mode 100644
index 818b277..0000000
--- a/vendor/src/github.com/gregjones/httpcache/memcache/appengine_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build appengine
-
-package memcache
-
-import (
- "bytes"
- "testing"
-
- "appengine/aetest"
-)
-
-func TestAppEngine(t *testing.T) {
- ctx, err := aetest.NewContext(nil)
- if err != nil {
- t.Fatal(err)
- }
- defer ctx.Close()
-
- cache := New(ctx)
-
- key := "testKey"
- _, ok := cache.Get(key)
- if ok {
- t.Fatal("retrieved key before adding it")
- }
-
- val := []byte("some bytes")
- cache.Set(key, val)
-
- retVal, ok := cache.Get(key)
- if !ok {
- t.Fatal("could not retrieve an element we just added")
- }
- if !bytes.Equal(retVal, val) {
- t.Fatal("retrieved a different value than what we put in")
- }
-
- cache.Delete(key)
-
- _, ok = cache.Get(key)
- if ok {
- t.Fatal("deleted key still present")
- }
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/memcache/memcache.go b/vendor/src/github.com/gregjones/httpcache/memcache/memcache.go
deleted file mode 100644
index 462f0e5..0000000
--- a/vendor/src/github.com/gregjones/httpcache/memcache/memcache.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// +build !appengine
-
-// Package memcache provides an implementation of httpcache.Cache that uses
-// gomemcache to store cached responses.
-//
-// When built for Google App Engine, this package will provide an
-// implementation that uses App Engine's memcache service. See the
-// appengine.go file in this package for details.
-package memcache
-
-import (
- "github.com/bradfitz/gomemcache/memcache"
-)
-
-// Cache is an implementation of httpcache.Cache that caches responses in a
-// memcache server.
-type Cache struct {
- *memcache.Client
-}
-
-// cacheKey modifies an httpcache key for use in memcache. Specifically, it
-// prefixes keys to avoid collision with other data stored in memcache.
-func cacheKey(key string) string {
- return "httpcache:" + key
-}
-
-// Get returns the response corresponding to key if present.
-func (c *Cache) Get(key string) (resp []byte, ok bool) {
- item, err := c.Client.Get(cacheKey(key))
- if err != nil {
- return nil, false
- }
- return item.Value, true
-}
-
-// Set saves a response to the cache as key.
-func (c *Cache) Set(key string, resp []byte) {
- item := &memcache.Item{
- Key: cacheKey(key),
- Value: resp,
- }
- c.Client.Set(item)
-}
-
-// Delete removes the response with key from the cache.
-func (c *Cache) Delete(key string) {
- c.Client.Delete(cacheKey(key))
-}
-
-// New returns a new Cache using the provided memcache server(s) with equal
-// weight. If a server is listed multiple times, it gets a proportional amount
-// of weight.
-func New(server ...string) *Cache {
- return NewWithClient(memcache.New(server...))
-}
-
-// NewWithClient returns a new Cache with the given memcache client.
-func NewWithClient(client *memcache.Client) *Cache {
- return &Cache{client}
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/memcache/memcache_test.go b/vendor/src/github.com/gregjones/httpcache/memcache/memcache_test.go
deleted file mode 100644
index 4dcc547..0000000
--- a/vendor/src/github.com/gregjones/httpcache/memcache/memcache_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// +build !appengine
-
-package memcache
-
-import (
- "bytes"
- "net"
- "testing"
-)
-
-const testServer = "localhost:11211"
-
-func TestMemCache(t *testing.T) {
- conn, err := net.Dial("tcp", testServer)
- if err != nil {
- // TODO: rather than skip the test, fall back to a faked memcached server
- t.Skipf("skipping test; no server running at %s", testServer)
- }
- conn.Write([]byte("flush_all\r\n")) // flush memcache
- conn.Close()
-
- cache := New(testServer)
-
- key := "testKey"
- _, ok := cache.Get(key)
- if ok {
- t.Fatal("retrieved key before adding it")
- }
-
- val := []byte("some bytes")
- cache.Set(key, val)
-
- retVal, ok := cache.Get(key)
- if !ok {
- t.Fatal("could not retrieve an element we just added")
- }
- if !bytes.Equal(retVal, val) {
- t.Fatal("retrieved a different value than what we put in")
- }
-
- cache.Delete(key)
-
- _, ok = cache.Get(key)
- if ok {
- t.Fatal("deleted key still present")
- }
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/redis/redis.go b/vendor/src/github.com/gregjones/httpcache/redis/redis.go
deleted file mode 100644
index 3143d44..0000000
--- a/vendor/src/github.com/gregjones/httpcache/redis/redis.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Package redis provides a redis interface for http caching.
-package redis
-
-import (
- "github.com/garyburd/redigo/redis"
- "github.com/gregjones/httpcache"
-)
-
-// cache is an implementation of httpcache.Cache that caches responses in a
-// redis server.
-type cache struct {
- redis.Conn
-}
-
-// cacheKey modifies an httpcache key for use in redis. Specifically, it
-// prefixes keys to avoid collision with other data stored in redis.
-func cacheKey(key string) string {
- return "rediscache:" + key
-}
-
-// Get returns the response corresponding to key if present.
-func (c cache) Get(key string) (resp []byte, ok bool) {
- item, err := redis.Bytes(c.Do("GET", cacheKey(key)))
- if err != nil {
- return nil, false
- }
- return item, true
-}
-
-// Set saves a response to the cache as key.
-func (c cache) Set(key string, resp []byte) {
- c.Do("SET", cacheKey(key), resp)
-}
-
-// Delete removes the response with key from the cache.
-func (c cache) Delete(key string) {
- c.Do("DEL", cacheKey(key))
-}
-
-// NewWithClient returns a new Cache with the given redis connection.
-func NewWithClient(client redis.Conn) httpcache.Cache {
- return cache{client}
-}
diff --git a/vendor/src/github.com/gregjones/httpcache/redis/redis_test.go b/vendor/src/github.com/gregjones/httpcache/redis/redis_test.go
deleted file mode 100644
index 72f6f61..0000000
--- a/vendor/src/github.com/gregjones/httpcache/redis/redis_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package redis
-
-import (
- "bytes"
- "testing"
-
- "github.com/garyburd/redigo/redis"
-)
-
-func TestRedisCache(t *testing.T) {
- conn, err := redis.Dial("tcp", "localhost:6379")
- if err != nil {
- // TODO: rather than skip the test, fall back to a faked redis server
- t.Skipf("skipping test; no server running at localhost:6379")
- }
- conn.Do("FLUSHALL")
-
- cache := NewWithClient(conn)
-
- key := "testKey"
- _, ok := cache.Get(key)
- if ok {
- t.Fatal("retrieved key before adding it")
- }
-
- val := []byte("some bytes")
- cache.Set(key, val)
-
- retVal, ok := cache.Get(key)
- if !ok {
- t.Fatal("could not retrieve an element we just added")
- }
- if !bytes.Equal(retVal, val) {
- t.Fatal("retrieved a different value than what we put in")
- }
-
- cache.Delete(key)
-
- _, ok = cache.Get(key)
- if ok {
- t.Fatal("deleted key still present")
- }
-}
diff --git a/vendor/src/github.com/jaytaylor/html2text/LICENSE b/vendor/src/github.com/jaytaylor/html2text/LICENSE
deleted file mode 100644
index 24dc4ab..0000000
--- a/vendor/src/github.com/jaytaylor/html2text/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Jay Taylor
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/src/github.com/jaytaylor/html2text/README.md b/vendor/src/github.com/jaytaylor/html2text/README.md
deleted file mode 100644
index ac11247..0000000
--- a/vendor/src/github.com/jaytaylor/html2text/README.md
+++ /dev/null
@@ -1,116 +0,0 @@
-# html2text
-
-[![Documentation](https://godoc.org/github.com/jaytaylor/html2text?status.svg)](https://godoc.org/github.com/jaytaylor/html2text)
-[![Build Status](https://travis-ci.org/jaytaylor/html2text.svg?branch=master)](https://travis-ci.org/jaytaylor/html2text)
-[![Report Card](https://goreportcard.com/badge/github.com/jaytaylor/html2text)](https://goreportcard.com/report/github.com/jaytaylor/html2text)
-
-### Converts HTML into text
-
-
-## Introduction
-
-Ensure your emails are readable by all!
-
-Turns HTML into raw text, useful for sending fancy HTML emails with a equivalently nicely formatted TXT document as a fallback (e.g. for people who don't allow HTML emails or have other display issues).
-
-html2text is a simple golang package for rendering HTML into plaintext.
-
-There are still lots of improvements to be had, but FWIW this has worked fine for my [basic] HTML-2-text needs.
-
-It requires go 1.x or newer ;)
-
-
-## Download the package
-
-```bash
-go get github.com/jaytaylor/html2text
-```
-
-## Example usage
-
-```go
-package main
-
-import (
- "fmt"
-
- "github.com/jaytaylor/html2text"
-)
-
-func main() {
- inputHtml := `
-
-
- My Mega Service
-
-
-
-
-
-
-
-
-
- Welcome to your new account on my service!
-
-
- Here is some more information:
-
-
-
-
-
- `
-
- text, err := html2text.FromString(inputHtml)
- if err != nil {
- panic(err)
- }
- fmt.Println(text)
-}
-```
-
-Output:
-```
-Mega Service ( http://mymegaservice.com/ )
-
-******************************************
-Welcome to your new account on my service!
-******************************************
-
-Here is some more information:
-
-* Link 1: Example.com ( https://example.com )
-* Link 2: Example2.com ( https://example2.com )
-* Something else
-```
-
-
-## Unit-tests
-
-Running the unit-tests is straightforward and standard:
-
-```bash
-go test
-```
-
-
-# License
-
-Permissive MIT license.
-
-
-## Contact
-
-You are more than welcome to open issues and send pull requests if you find a bug or want a new feature.
-
-If you appreciate this library please feel free to drop me a line and tell me! It's always nice to hear from people who have benefitted from my work.
-
-Email: jay at (my github username).com
-
-Twitter: [@jtaylor](https://twitter.com/jtaylor)
-
diff --git a/vendor/src/github.com/jaytaylor/html2text/html2text.go b/vendor/src/github.com/jaytaylor/html2text/html2text.go
deleted file mode 100644
index 8933fbe..0000000
--- a/vendor/src/github.com/jaytaylor/html2text/html2text.go
+++ /dev/null
@@ -1,304 +0,0 @@
-package html2text
-
-import (
- "bytes"
- "io"
- "regexp"
- "strings"
- "unicode"
-
- "golang.org/x/net/html"
- "golang.org/x/net/html/atom"
-)
-
-var (
- spacingRe = regexp.MustCompile(`[ \r\n\t]+`)
- newlineRe = regexp.MustCompile(`\n\n+`)
-)
-
-type textifyTraverseCtx struct {
- Buf bytes.Buffer
-
- prefix string
- blockquoteLevel int
- lineLength int
- endsWithSpace bool
- endsWithNewline bool
- justClosedDiv bool
-}
-
-func (ctx *textifyTraverseCtx) traverse(node *html.Node) error {
- switch node.Type {
-
- default:
- return ctx.traverseChildren(node)
-
- case html.TextNode:
- data := strings.Trim(spacingRe.ReplaceAllString(node.Data, " "), " ")
- return ctx.emit(data)
-
- case html.ElementNode:
-
- ctx.justClosedDiv = false
- switch node.DataAtom {
- case atom.Br:
- return ctx.emit("\n")
-
- case atom.H1, atom.H2, atom.H3:
- subCtx := textifyTraverseCtx{}
- if err := subCtx.traverseChildren(node); err != nil {
- return err
- }
-
- str := subCtx.Buf.String()
- dividerLen := 0
- for _, line := range strings.Split(str, "\n") {
- if lineLen := len([]rune(line)); lineLen-1 > dividerLen {
- dividerLen = lineLen - 1
- }
- }
- divider := ""
- if node.DataAtom == atom.H1 {
- divider = strings.Repeat("*", dividerLen)
- } else {
- divider = strings.Repeat("-", dividerLen)
- }
-
- if node.DataAtom == atom.H3 {
- return ctx.emit("\n\n" + str + "\n" + divider + "\n\n")
- }
- return ctx.emit("\n\n" + divider + "\n" + str + "\n" + divider + "\n\n")
-
- case atom.Blockquote:
- ctx.blockquoteLevel++
- ctx.prefix = strings.Repeat(">", ctx.blockquoteLevel) + " "
- if err := ctx.emit("\n"); err != nil {
- return err
- }
- if ctx.blockquoteLevel == 1 {
- if err := ctx.emit("\n"); err != nil {
- return err
- }
- }
- if err := ctx.traverseChildren(node); err != nil {
- return err
- }
- ctx.blockquoteLevel--
- ctx.prefix = strings.Repeat(">", ctx.blockquoteLevel)
- if ctx.blockquoteLevel > 0 {
- ctx.prefix += " "
- }
- return ctx.emit("\n\n")
-
- case atom.Div:
- if ctx.lineLength > 0 {
- if err := ctx.emit("\n"); err != nil {
- return err
- }
- }
- if err := ctx.traverseChildren(node); err != nil {
- return err
- }
- var err error
- if ctx.justClosedDiv == false {
- err = ctx.emit("\n")
- }
- ctx.justClosedDiv = true
- return err
-
- case atom.Li:
- if err := ctx.emit("* "); err != nil {
- return err
- }
-
- if err := ctx.traverseChildren(node); err != nil {
- return err
- }
-
- return ctx.emit("\n")
-
- case atom.B, atom.Strong:
- subCtx := textifyTraverseCtx{}
- subCtx.endsWithSpace = true
- if err := subCtx.traverseChildren(node); err != nil {
- return err
- }
- str := subCtx.Buf.String()
- return ctx.emit("*" + str + "*")
-
- case atom.A:
- // If image is the only child, take its alt text as the link text
- if img := node.FirstChild; img != nil && node.LastChild == img && img.DataAtom == atom.Img {
- if altText := getAttrVal(img, "alt"); altText != "" {
- ctx.emit(altText)
- }
- } else if err := ctx.traverseChildren(node); err != nil {
- return err
- }
-
- hrefLink := ""
- if attrVal := getAttrVal(node, "href"); attrVal != "" {
- attrVal = ctx.normalizeHrefLink(attrVal)
- if attrVal != "" {
- hrefLink = "( " + attrVal + " )"
- }
- }
-
- return ctx.emit(hrefLink)
-
- case atom.P, atom.Ul, atom.Table:
- if err := ctx.emit("\n\n"); err != nil {
- return err
- }
-
- if err := ctx.traverseChildren(node); err != nil {
- return err
- }
-
- return ctx.emit("\n\n")
-
- case atom.Tr:
- if err := ctx.traverseChildren(node); err != nil {
- return err
- }
-
- return ctx.emit("\n")
-
- case atom.Style, atom.Script, atom.Head:
- // Ignore the subtree
- return nil
-
- default:
- return ctx.traverseChildren(node)
- }
- }
-}
-
-func (ctx *textifyTraverseCtx) traverseChildren(node *html.Node) error {
- for c := node.FirstChild; c != nil; c = c.NextSibling {
- if err := ctx.traverse(c); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (ctx *textifyTraverseCtx) emit(data string) error {
- if len(data) == 0 {
- return nil
- }
- lines := ctx.breakLongLines(data)
- var err error
- for _, line := range lines {
- runes := []rune(line)
- startsWithSpace := unicode.IsSpace(runes[0])
- if !startsWithSpace && !ctx.endsWithSpace {
- ctx.Buf.WriteByte(' ')
- ctx.lineLength++
- }
- ctx.endsWithSpace = unicode.IsSpace(runes[len(runes)-1])
- for _, c := range line {
- _, err = ctx.Buf.WriteString(string(c))
- if err != nil {
- return err
- }
- ctx.lineLength++
- if c == '\n' {
- ctx.lineLength = 0
- if ctx.prefix != "" {
- _, err = ctx.Buf.WriteString(ctx.prefix)
- if err != nil {
- return err
- }
- }
- }
- }
- }
- return nil
-}
-
-func (ctx *textifyTraverseCtx) breakLongLines(data string) []string {
- // only break lines when we are in blockquotes
- if ctx.blockquoteLevel == 0 {
- return []string{data}
- }
- var ret []string
- runes := []rune(data)
- l := len(runes)
- existing := ctx.lineLength
- if existing >= 74 {
- ret = append(ret, "\n")
- existing = 0
- }
- for l+existing > 74 {
- i := 74 - existing
- for i >= 0 && !unicode.IsSpace(runes[i]) {
- i--
- }
- if i == -1 {
- // no spaces, so go the other way
- i = 74 - existing
- for i < l && !unicode.IsSpace(runes[i]) {
- i++
- }
- }
- ret = append(ret, string(runes[:i])+"\n")
- for i < l && unicode.IsSpace(runes[i]) {
- i++
- }
- runes = runes[i:]
- l = len(runes)
- existing = 0
- }
- if len(runes) > 0 {
- ret = append(ret, string(runes))
- }
- return ret
-}
-
-func (ctx *textifyTraverseCtx) normalizeHrefLink(link string) string {
- link = strings.TrimSpace(link)
- link = strings.TrimPrefix(link, "mailto:")
- return link
-}
-
-func getAttrVal(node *html.Node, attrName string) string {
- for _, attr := range node.Attr {
- if attr.Key == attrName {
- return attr.Val
- }
- }
-
- return ""
-}
-
-func FromHtmlNode(doc *html.Node) (string, error) {
- ctx := textifyTraverseCtx{
- Buf: bytes.Buffer{},
- }
- if err := ctx.traverse(doc); err != nil {
- return "", err
- }
-
- text := strings.TrimSpace(newlineRe.ReplaceAllString(
- strings.Replace(ctx.Buf.String(), "\n ", "\n", -1), "\n\n"))
- return text, nil
-
-}
-
-func FromReader(reader io.Reader) (string, error) {
- doc, err := html.Parse(reader)
- if err != nil {
- return "", err
- }
- return FromHtmlNode(doc)
-}
-
-func FromString(input string) (string, error) {
- text, err := FromReader(strings.NewReader(input))
- if err != nil {
- return "", err
- }
- return text, nil
-}
diff --git a/vendor/src/github.com/jaytaylor/html2text/html2text_test.go b/vendor/src/github.com/jaytaylor/html2text/html2text_test.go
deleted file mode 100644
index c55aa3e..0000000
--- a/vendor/src/github.com/jaytaylor/html2text/html2text_test.go
+++ /dev/null
@@ -1,630 +0,0 @@
-package html2text
-
-import (
- "fmt"
- "regexp"
- "testing"
-)
-
-func TestStrippingWhitespace(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "test text",
- "test text",
- },
- {
- " \ttext\ntext\n",
- "text text",
- },
- {
- " \na \n\t \n \n a \t",
- "a a",
- },
- {
- "test text",
- "test text",
- },
- {
- "test text ",
- "test   text",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestParagraphsAndBreaks(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "Test text",
- "Test text",
- },
- {
- "Test text
",
- "Test text",
- },
- {
- "Test text
Test",
- "Test text\nTest",
- },
- {
- "
Test text
",
- "Test text",
- },
- {
- "
Test text
Test text
",
- "Test text\n\nTest text",
- },
- {
- "\n
Test text
\n\n\n\t
Test text
\n",
- "Test text\n\nTest text",
- },
- {
- "\n
Test text Test text
\n",
- "Test text\nTest text",
- },
- {
- "\n
Test text \tTest text
\n",
- "Test text\nTest text",
- },
- {
- "Test text
Test text",
- "Test text\n\nTest text",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestTables(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "
",
- "",
- },
- {
- "
",
- "cell1 cell2",
- },
- {
- "
",
- "row1\nrow2",
- },
- {
- `
- cell1-1 cell1-2
- cell2-1 cell2-2
-
`,
- "cell1-1 cell1-2\ncell2-1 cell2-2",
- },
- {
- "_
_",
- "_\n\ncell\n\n_",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestStrippingLists(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "
",
- "",
- },
- {
- "
_",
- "* item\n\n_",
- },
- {
- "
item 1 item 2 \n_",
- "* item 1\n* item 2\n_",
- },
- {
- "
item 1 \t\n
item 2 item 3 \n_",
- "* item 1\n* item 2\n* item 3\n_",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestLinks(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- `
`,
- ``,
- },
- {
- `
`,
- ``,
- },
- {
- `
`,
- `( http://example.com/ )`,
- },
- {
- `
Link `,
- `Link`,
- },
- {
- `
Link `,
- `Link ( http://example.com/ )`,
- },
- {
- `
Link `,
- `Link ( http://example.com/ )`,
- },
- {
- "
\n\tLink \n\t ",
- `Link ( http://example.com/ )`,
- },
- {
- "
Contact Us ",
- `Contact Us ( contact@example.org )`,
- },
- {
- "
Link ",
- `Link ( http://example.com:80/~user?aaa=bb&c=d,e,f#foo )`,
- },
- {
- "
Link ",
- `Link ( http://example.com/ )`,
- },
- {
- "
Link ",
- `Link ( http://example.com/ )`,
- },
- {
- "
Link A Link B ",
- `Link A ( http://example.com/a/ ) Link B ( http://example.com/b/ )`,
- },
- {
- "
Link ",
- `Link ( %%LINK%% )`,
- },
- {
- "
Link ",
- `Link ( [LINK] )`,
- },
- {
- "
Link ",
- `Link ( {LINK} )`,
- },
- {
- "
Link ",
- `Link ( [[!unsubscribe]] )`,
- },
- {
- "
This is link1 and link2 is next.
",
- `This is link1 ( http://www.google.com ) and link2 ( http://www.google.com ) is next.`,
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestImageAltTags(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- `
`,
- ``,
- },
- {
- `
`,
- ``,
- },
- {
- `
`,
- ``,
- },
- {
- `
`,
- ``,
- },
- // Images do matter if they are in a link
- {
- `
`,
- `Example ( http://example.com/ )`,
- },
- {
- `
`,
- `Example ( http://example.com/ )`,
- },
- {
- `
`,
- `Example ( http://example.com/ )`,
- },
- {
- `
`,
- `Example ( http://example.com/ )`,
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestHeadings(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "
Test ",
- "****\nTest\n****",
- },
- {
- "\t
\nTest ",
- "****\nTest\n****",
- },
- {
- "\t
\nTest line 1 Test 2 ",
- "***********\nTest line 1\nTest 2\n***********",
- },
- {
- "
Test Test ",
- "****\nTest\n****\n\n****\nTest\n****",
- },
- {
- "
Test ",
- "----\nTest\n----",
- },
- {
- "
",
- "****************************\nTest ( http://example.com/ )\n****************************",
- },
- {
- "
Test ",
- "Test\n----",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-
-}
-
-func TestBold(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "
Test ",
- "*Test*",
- },
- {
- "\t
Test ",
- "*Test*",
- },
- {
- "\t
Test line 1 Test 2 ",
- "*Test line 1\nTest 2*",
- },
- {
- "
Test Test ",
- "*Test* *Test*",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-
-}
-
-func TestDiv(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "
Test
",
- "Test",
- },
- {
- "\t
Test
",
- "Test",
- },
- {
- "
",
- "Test line 1\nTest 2",
- },
- {
- "Test 1
Test 2
Test 3
Test 4",
- "Test 1\nTest 2\nTest 3\nTest 4",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-
-}
-
-func TestBlockquotes(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "
level 0
level 1level 2 level 1 level 0
",
- "level 0\n> \n> level 1\n> \n>> level 2\n> \n> level 1\n\nlevel 0",
- },
- {
- "
Test Test",
- "> \n> Test\n\nTest",
- },
- {
- "\t
\nTest ",
- "> \n> Test\n>",
- },
- {
- "\t
\nTest line 1 Test 2 ",
- "> \n> Test line 1\n> Test 2",
- },
- {
- "
Test Test Other Test",
- "> \n> Test\n\n> \n> Test\n\nOther Test",
- },
- {
- "
Lorem ipsum Commodo id consectetur pariatur ea occaecat minim aliqua ad sit consequat quis ex commodo Duis incididunt eu mollit consectetur fugiat voluptate dolore in pariatur in commodo occaecat Ut occaecat velit esse labore aute quis commodo non sit dolore officia Excepteur cillum amet cupidatat culpa velit labore ullamco dolore mollit elit in aliqua dolor irure do ",
- "> \n> Lorem ipsum Commodo id consectetur pariatur ea occaecat minim aliqua ad\n> sit consequat quis ex commodo Duis incididunt eu mollit consectetur fugiat\n> voluptate dolore in pariatur in commodo occaecat Ut occaecat velit esse\n> labore aute quis commodo non sit dolore officia Excepteur cillum amet\n> cupidatat culpa velit labore ullamco dolore mollit elit in aliqua dolor\n> irure do",
- },
- {
- "
Loremipsum Commodo id consectetur pariatur ea occaecat minim aliqua ad sit consequat quis ex commodo Duis incididunt eu mollit consectetur fugiat voluptate dolore in pariatur in commodo occaecat Ut occaecat velit esse labore aute quis commodo non sit dolore officia Excepteur cillum amet cupidatat culpa velit labore ullamco dolore mollit elit in aliqua dolor irure do ",
- "> \n> Lorem *ipsum* *Commodo* *id* *consectetur* *pariatur* *ea* *occaecat* *minim*\n> *aliqua* *ad* *sit* *consequat* *quis* *ex* *commodo* *Duis* *incididunt* *eu*\n> *mollit* *consectetur* *fugiat* *voluptate* *dolore* *in* *pariatur* *in* *commodo*\n> *occaecat* *Ut* *occaecat* *velit* *esse* *labore* *aute* *quis* *commodo*\n> *non* *sit* *dolore* *officia* *Excepteur* *cillum* *amet* *cupidatat* *culpa*\n> *velit* *labore* *ullamco* *dolore* *mollit* *elit* *in* *aliqua* *dolor* *irure*\n> *do*",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-
-}
-
-func TestIgnoreStylesScriptsHead(t *testing.T) {
- testCases := []struct {
- input string
- output string
- }{
- {
- "",
- "",
- },
- {
- "",
- "",
- },
- {
- "
",
- "",
- },
- {
- "",
- "",
- },
- {
- "",
- "",
- },
- {
- "",
- "",
- },
- {
- "",
- "",
- },
- {
- "",
- "",
- },
- {
- "",
- "",
- },
- {
- `
Title `,
- "",
- },
- }
-
- for _, testCase := range testCases {
- assertString(t, testCase.input, testCase.output)
- }
-}
-
-func TestText(t *testing.T) {
- testCases := []struct {
- input string
- expr string
- }{
- {
- `
- New repository
- `,
- `\* New repository \( /new \)`,
- },
- {
- `hi
-
-
-
- hello
google
-
- test
List:
-
-
-`,
- `hi
-hello google \( https://google.com \)
-
-test
-
-List:
-
-\* Foo \( foo \)
-\* Barsoap \( http://www.microshwhat.com/bar/soapy \)
-\* Baz`,
- },
- // Malformed input html.
- {
- `hi
-
- hello
google
-
- test
List:
-
-
- `,
- `hi hello google \( https://google.com \) test
-
-List:
-
-\* Foo \( foo \)
-\* Bar \( /\n[ \t]+bar/baz \)
-\* Baz`,
- },
- }
-
- for _, testCase := range testCases {
- assertRegexp(t, testCase.input, testCase.expr)
- }
-}
-
-type StringMatcher interface {
- MatchString(string) bool
- String() string
-}
-
-type RegexpStringMatcher string
-
-func (m RegexpStringMatcher) MatchString(str string) bool {
- return regexp.MustCompile(string(m)).MatchString(str)
-}
-func (m RegexpStringMatcher) String() string {
- return string(m)
-}
-
-type ExactStringMatcher string
-
-func (m ExactStringMatcher) MatchString(str string) bool {
- return string(m) == str
-}
-func (m ExactStringMatcher) String() string {
- return string(m)
-}
-
-func assertRegexp(t *testing.T, input string, outputRE string) {
- assertPlaintext(t, input, RegexpStringMatcher(outputRE))
-}
-
-func assertString(t *testing.T, input string, output string) {
- assertPlaintext(t, input, ExactStringMatcher(output))
-}
-
-func assertPlaintext(t *testing.T, input string, matcher StringMatcher) {
- text, err := FromString(input)
- if err != nil {
- t.Error(err)
- }
- if !matcher.MatchString(text) {
- t.Errorf("Input did not match expression\n"+
- "Input:\n>>>>\n%s\n<<<<\n\n"+
- "Output:\n>>>>\n%s\n<<<<\n\n"+
- "Expected output:\n>>>>\n%s\n<<<<\n\n",
- input, text, matcher.String())
- } else {
- t.Logf("input:\n\n%s\n\n\n\noutput:\n\n%s\n", input, text)
- }
-}
-
-func Example() {
- inputHtml := `
-
-
-
My Mega Service
-
-
-
-
-
-
-
-
-
-
Welcome to your new account on my service!
-
-
- Here is some more information:
-
-
-
-
-
- `
-
- text, err := FromString(inputHtml)
- if err != nil {
- panic(err)
- }
- fmt.Println(text)
-
- // Output:
- // Mega Service ( http://mymegaservice.com/ )
- //
- // ******************************************
- // Welcome to your new account on my service!
- // ******************************************
- //
- // Here is some more information:
- //
- // * Link 1: Example.com ( https://example.com )
- // * Link 2: Example2.com ( https://example2.com )
- // * Something else
-}
diff --git a/vendor/src/github.com/matrix-org/dugong/LICENSE b/vendor/src/github.com/matrix-org/dugong/LICENSE
deleted file mode 100644
index 8dada3e..0000000
--- a/vendor/src/github.com/matrix-org/dugong/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/src/github.com/matrix-org/dugong/README.md b/vendor/src/github.com/matrix-org/dugong/README.md
deleted file mode 100644
index cc22676..0000000
--- a/vendor/src/github.com/matrix-org/dugong/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# dugong
-Logging utilities for [logrus](https://github.com/Sirupsen/logrus).
-
-To develop on this library, you need logrus on your GOPATH:
-
- ``go get github.com/Sirupsen/logrus``
-
-You can then run its tests by running
-
- ``go test``
diff --git a/vendor/src/github.com/matrix-org/dugong/fshook.go b/vendor/src/github.com/matrix-org/dugong/fshook.go
deleted file mode 100644
index be539ab..0000000
--- a/vendor/src/github.com/matrix-org/dugong/fshook.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package dugong
-
-import (
- "compress/gzip"
- "fmt"
- log "github.com/Sirupsen/logrus"
- "io"
- "os"
- "path/filepath"
- "sync/atomic"
- "time"
-)
-
-// RotationScheduler determines when files should be rotated.
-type RotationScheduler interface {
- // ShouldRotate returns true if the file should be rotated. The suffix to apply
- // to the filename is returned as the 2nd arg.
- ShouldRotate() (bool, string)
- // ShouldGZip returns true if the file should be gzipped when it is rotated.
- ShouldGZip() bool
-}
-
-// DailyRotationSchedule rotates log files daily. Logs are only rotated
-// when midnight passes *whilst the process is running*. E.g: if you run
-// the process on Day 4 then stop it and start it on Day 7, no rotation will
-// occur when the process starts.
-type DailyRotationSchedule struct {
- GZip bool
- rotateAfter *time.Time
-}
-
-var currentTime = time.Now // exclusively for testing
-
-func dayOffset(t time.Time, offsetDays int) time.Time {
- // GoDoc:
- // The month, day, hour, min, sec, and nsec values may be outside their
- // usual ranges and will be normalized during the conversion.
- // For example, October 32 converts to November 1.
- return time.Date(
- t.Year(), t.Month(), t.Day()+offsetDays, 0, 0, 0, 0, t.Location(),
- )
-}
-
-func (rs *DailyRotationSchedule) ShouldRotate() (bool, string) {
- now := currentTime()
- if rs.rotateAfter == nil {
- nextRotate := dayOffset(now, 1)
- rs.rotateAfter = &nextRotate
- return false, ""
- }
- if now.After(*rs.rotateAfter) {
- // the suffix should be actually the date of the complete day being logged
- actualDay := dayOffset(*rs.rotateAfter, -1)
- suffix := "." + actualDay.Format("2006-01-02") // YYYY-MM-DD
- nextRotate := dayOffset(now, 1)
- rs.rotateAfter = &nextRotate
- return true, suffix
- }
- return false, ""
-}
-
-func (rs *DailyRotationSchedule) ShouldGZip() bool {
- return rs.GZip
-}
-
-// NewFSHook makes a logging hook that writes formatted
-// log entries to info, warn and error log files. Each log file
-// contains the messages with that severity or higher. If a formatter is
-// not specified, they will be logged using a JSON formatter. If a
-// RotationScheduler is set, the files will be cycled according to its rules.
-func NewFSHook(infoPath, warnPath, errorPath string, formatter log.Formatter, rotSched RotationScheduler) log.Hook {
- if formatter == nil {
- formatter = &log.JSONFormatter{}
- }
- hook := &fsHook{
- entries: make(chan log.Entry, 1024),
- infoPath: infoPath,
- warnPath: warnPath,
- errorPath: errorPath,
- formatter: formatter,
- scheduler: rotSched,
- }
-
- go func() {
- for entry := range hook.entries {
- if err := hook.writeEntry(&entry); err != nil {
- fmt.Fprintf(os.Stderr, "Error writing to logfile: %v\n", err)
- }
- atomic.AddInt32(&hook.queueSize, -1)
- }
- }()
-
- return hook
-}
-
-type fsHook struct {
- entries chan log.Entry
- queueSize int32
- infoPath string
- warnPath string
- errorPath string
- formatter log.Formatter
- scheduler RotationScheduler
-}
-
-func (hook *fsHook) Fire(entry *log.Entry) error {
- atomic.AddInt32(&hook.queueSize, 1)
- hook.entries <- *entry
- return nil
-}
-
-func (hook *fsHook) writeEntry(entry *log.Entry) error {
- msg, err := hook.formatter.Format(entry)
- if err != nil {
- return nil
- }
-
- if hook.scheduler != nil {
- if should, suffix := hook.scheduler.ShouldRotate(); should {
- if err := hook.rotate(suffix, hook.scheduler.ShouldGZip()); err != nil {
- return err
- }
- }
- }
-
- if entry.Level <= log.ErrorLevel {
- if err := logToFile(hook.errorPath, msg); err != nil {
- return err
- }
- }
-
- if entry.Level <= log.WarnLevel {
- if err := logToFile(hook.warnPath, msg); err != nil {
- return err
- }
- }
-
- if entry.Level <= log.InfoLevel {
- if err := logToFile(hook.infoPath, msg); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (hook *fsHook) Levels() []log.Level {
- return []log.Level{
- log.PanicLevel,
- log.FatalLevel,
- log.ErrorLevel,
- log.WarnLevel,
- log.InfoLevel,
- }
-}
-
-// rotate all the log files to the given suffix.
-// If error path is "err.log" and suffix is "1" then move
-// the contents to "err.log1".
-// This requires no locking as the goroutine calling this is the same
-// one which does the logging. Since we don't hold open a handle to the
-// file when writing, a simple Rename is all that is required.
-func (hook *fsHook) rotate(suffix string, gzip bool) error {
- for _, fpath := range []string{hook.errorPath, hook.warnPath, hook.infoPath} {
- logFilePath := fpath + suffix
- if err := os.Rename(fpath, logFilePath); err != nil {
- // e.g. because there were no errors in error.log for this day
- fmt.Fprintf(os.Stderr, "Error rotating file %s: %v\n", fpath, err)
- continue // don't try to gzip if we failed to rotate
- }
- if gzip {
- if err := gzipFile(logFilePath); err != nil {
- fmt.Fprintf(os.Stderr, "Failed to gzip file %s: %v\n", logFilePath, err)
- }
- }
- }
- return nil
-}
-
-func logToFile(path string, msg []byte) error {
- fd, err := os.OpenFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
- if err != nil {
- return err
- }
- defer fd.Close()
- _, err = fd.Write(msg)
- return err
-}
-
-func gzipFile(fpath string) error {
- reader, err := os.Open(fpath)
- if err != nil {
- return err
- }
-
- filename := filepath.Base(fpath)
- target := filepath.Join(filepath.Dir(fpath), filename+".gz")
- writer, err := os.Create(target)
- if err != nil {
- return err
- }
- defer writer.Close()
-
- archiver := gzip.NewWriter(writer)
- archiver.Name = filename
- defer archiver.Close()
-
- _, err = io.Copy(archiver, reader)
- return err
-}
diff --git a/vendor/src/github.com/matrix-org/dugong/fshook_test.go b/vendor/src/github.com/matrix-org/dugong/fshook_test.go
deleted file mode 100644
index a51a51f..0000000
--- a/vendor/src/github.com/matrix-org/dugong/fshook_test.go
+++ /dev/null
@@ -1,314 +0,0 @@
-package dugong
-
-import (
- "bufio"
- "encoding/json"
- log "github.com/Sirupsen/logrus"
- "io/ioutil"
- "os"
- "path/filepath"
- "runtime"
- "sync"
- "sync/atomic"
- "testing"
- "time"
-)
-
-const (
- fieldName = "my_field"
- fieldValue = "my_value"
-)
-
-func TestFSHookInfo(t *testing.T) {
- logger, hook, wait, teardown := setupLogHook(t)
- defer teardown()
-
- logger.WithField(fieldName, fieldValue).Info("Info message")
-
- wait()
-
- checkLogFile(t, hook.infoPath, "info")
-}
-
-func TestFSHookWarn(t *testing.T) {
- logger, hook, wait, teardown := setupLogHook(t)
- defer teardown()
-
- logger.WithField(fieldName, fieldValue).Warn("Warn message")
-
- wait()
-
- checkLogFile(t, hook.infoPath, "warning")
- checkLogFile(t, hook.warnPath, "warning")
-}
-
-func TestFSHookError(t *testing.T) {
- logger, hook, wait, teardown := setupLogHook(t)
- defer teardown()
-
- logger.WithField(fieldName, fieldValue).Error("Error message")
-
- wait()
-
- checkLogFile(t, hook.infoPath, "error")
- checkLogFile(t, hook.warnPath, "error")
- checkLogFile(t, hook.errorPath, "error")
-}
-
-func TestFsHookInterleaved(t *testing.T) {
- logger, hook, wait, teardown := setupLogHook(t)
- defer teardown()
-
- logger.WithField("counter", 0).Info("message")
- logger.WithField("counter", 1).Warn("message")
- logger.WithField("counter", 2).Error("message")
- logger.WithField("counter", 3).Warn("message")
- logger.WithField("counter", 4).Info("message")
-
- wait()
-
- file, err := os.Open(hook.infoPath)
- if err != nil {
- t.Fatalf("Failed to open file: %v", err)
- }
-
- scanner := bufio.NewScanner(file)
- count := 0
- for scanner.Scan() {
- data := make(map[string]interface{})
- if err := json.Unmarshal([]byte(scanner.Text()), &data); err != nil {
- t.Fatalf("Failed to parse JSON: %v", err)
- }
- dataCounter := int(data["counter"].(float64))
- if count != dataCounter {
- t.Fatalf("Counter: want %d got %d", count, dataCounter)
- }
- count++
- }
-
- if count != 5 {
- t.Fatalf("Lines: want 5 got %d", count)
- }
-}
-
-func TestFSHookMultiple(t *testing.T) {
- logger, hook, wait, teardown := setupLogHook(t)
- defer teardown()
-
- for i := 0; i < 100; i++ {
- logger.WithField("counter", i).Info("message")
- }
-
- wait()
-
- file, err := os.Open(hook.infoPath)
- if err != nil {
- t.Fatalf("Failed to open file: %v", err)
- }
-
- scanner := bufio.NewScanner(file)
- count := 0
- for scanner.Scan() {
- data := make(map[string]interface{})
- if err := json.Unmarshal([]byte(scanner.Text()), &data); err != nil {
- t.Fatalf("Failed to parse JSON: %v", err)
- }
- dataCounter := int(data["counter"].(float64))
- if count != dataCounter {
- t.Fatalf("Counter: want %d got %d", count, dataCounter)
- }
- count++
- }
-
- if count != 100 {
- t.Fatalf("Lines: want 100 got %d", count)
- }
-}
-
-func TestFSHookConcurrent(t *testing.T) {
- logger, hook, wait, teardown := setupLogHook(t)
- defer teardown()
-
- var wg sync.WaitGroup
-
- for i := 0; i < 100; i++ {
- wg.Add(1)
-
- go func(counter int) {
- defer wg.Done()
- logger.WithField("counter", counter).Info("message")
- }(i)
- }
-
- wg.Wait()
- wait()
-
- file, err := os.Open(hook.infoPath)
- if err != nil {
- t.Fatalf("Failed to open file: %v", err)
- }
-
- scanner := bufio.NewScanner(file)
- count := 0
- for scanner.Scan() {
- data := make(map[string]interface{})
- if err := json.Unmarshal([]byte(scanner.Text()), &data); err != nil {
- t.Fatalf("Failed to parse JSON: %v", err)
- }
- count++
- }
-
- if count != 100 {
- t.Fatalf("Lines: want 100 got %d", count)
- }
-}
-
-func TestDailySchedule(t *testing.T) {
- loc, err := time.LoadLocation("UTC")
- if err != nil {
- t.Fatalf("Failed to load location UTC: %s", err)
- }
-
- logger, hook, wait, teardown := setupLogHook(t)
- defer teardown()
- hook.scheduler = &DailyRotationSchedule{}
-
- // Time ticks from 23:50 to 00:10 in 1 minute increments. Log each tick as 'counter'.
- minutesGoneBy := 0
- currentTime = func() time.Time {
- minutesGoneBy += 1
- return time.Date(2016, 10, 26, 23, 50+minutesGoneBy, 00, 0, loc)
- }
- for i := 0; i < 20; i++ {
- t := time.Date(2016, 10, 26, 23, 50+i, 00, 0, loc)
- logger.WithField("counter", i).Info("BASE " + t.Format(time.ANSIC))
- }
-
- wait()
-
- // info.log.2016-10-26 should have 0 -> 9
- checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-26", 0, 9)
-
- // info.log should have 10 -> 19 inclusive
- checkFileHasSequentialCounts(t, hook.infoPath, 10, 19)
-}
-
-func TestDailyScheduleMultipleRotations(t *testing.T) {
- loc, err := time.LoadLocation("UTC")
- if err != nil {
- t.Fatalf("Failed to load location UTC: %s", err)
- }
-
- logger, hook, wait, teardown := setupLogHook(t)
- defer teardown()
- hook.scheduler = &DailyRotationSchedule{}
-
- // Time ticks every 12 hours from 13:37 -> 01:37 -> 13:37 -> ...
- hoursGoneBy := 0
- currentTime = func() time.Time {
- hoursGoneBy += 12
- // Start from 10/29 01:37
- return time.Date(2016, 10, 28, 13+hoursGoneBy, 37, 00, 0, loc)
- }
- // log 2 lines per file, to 4 files (so 8 log lines)
- for i := 0; i < 8; i++ {
- ts := time.Date(2016, 10, 28, 13+((i+1)*12), 37, 00, 0, loc)
- logger.WithField("counter", i).Infof("The time is now %s", ts)
- }
-
- wait()
-
- // info.log.2016-10-29 should have 0-1
- checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-29", 0, 1)
-
- // info.log.2016-10-30 should have 2-3
- checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-30", 2, 3)
-
- // info.log.2016-10-31 should have 4-5
- checkFileHasSequentialCounts(t, hook.infoPath+".2016-10-31", 4, 5)
-
- // info.log should have 6-7 (current day is 11/01)
- checkFileHasSequentialCounts(t, hook.infoPath, 6, 7)
-}
-
-// checkFileHasSequentialCounts based on a JSON "counter" key being a monotonically
-// incrementing integer. from and to are both inclusive.
-func checkFileHasSequentialCounts(t *testing.T, filepath string, from, to int) {
- t.Logf("checkFileHasSequentialCounts(%s,%d,%d)", filepath, from, to)
-
- file, err := os.Open(filepath)
- if err != nil {
- t.Fatalf("Failed to open file: %v", err)
- return
- }
-
- defer file.Close()
- scanner := bufio.NewScanner(file)
- count := from
- for scanner.Scan() {
- data := make(map[string]interface{})
- if err := json.Unmarshal([]byte(scanner.Text()), &data); err != nil {
- t.Fatalf("%s : Failed to parse JSON: %v", file.Name(), err)
- }
- dataCounter := int(data["counter"].(float64))
- t.Logf("%s want %d got %d", file.Name(), count, dataCounter)
- if count != dataCounter {
- t.Fatalf("%s : Counter: want %d got %d", file.Name(), count, dataCounter)
- }
-
- count++
- }
- count-- // never hit the next value
-
- if count != to {
- t.Fatalf("%s EOF: Want count %d got %d", file.Name(), to, count)
- }
-}
-
-func setupLogHook(t *testing.T) (logger *log.Logger, hook *fsHook, wait func(), teardown func()) {
- dir, err := ioutil.TempDir("", "TestFSHook")
- if err != nil {
- t.Fatalf("Failed to make temporary directory: %v", err)
- }
-
- infoPath := filepath.Join(dir, "info.log")
- warnPath := filepath.Join(dir, "warn.log")
- errorPath := filepath.Join(dir, "error.log")
-
- hook = NewFSHook(infoPath, warnPath, errorPath, nil, nil).(*fsHook)
-
- logger = log.New()
- logger.Hooks.Add(hook)
-
- wait = func() {
- for atomic.LoadInt32(&hook.queueSize) != 0 {
- runtime.Gosched()
- }
- }
-
- teardown = func() {
- os.RemoveAll(dir)
- }
-
- return
-}
-
-func checkLogFile(t *testing.T, path, expectedLevel string) {
- contents, err := ioutil.ReadFile(path)
- if err != nil {
- t.Fatalf("Failed to read file: %v", err)
- }
-
- data := make(map[string]interface{})
- if err := json.Unmarshal(contents, &data); err != nil {
- t.Fatalf("Failed to parse JSON: %v", err)
- }
-
- if data["level"] != expectedLevel {
- t.Fatalf("level: want %q got %q", expectedLevel, data["level"])
- }
-
- if data[fieldName] != fieldValue {
- t.Fatalf("%s: want %q got %q", fieldName, fieldValue, data[fieldName])
- }
-}
diff --git a/vendor/src/github.com/matrix-org/gomatrix/LICENSE b/vendor/src/github.com/matrix-org/gomatrix/LICENSE
deleted file mode 100644
index 8dada3e..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/src/github.com/matrix-org/gomatrix/README.md b/vendor/src/github.com/matrix-org/gomatrix/README.md
deleted file mode 100644
index 4f6df02..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# gomatrix
-[![GoDoc](https://godoc.org/github.com/matrix-org/gomatrix?status.svg)](https://godoc.org/github.com/matrix-org/gomatrix)
-
-A Golang Matrix client
diff --git a/vendor/src/github.com/matrix-org/gomatrix/client.go b/vendor/src/github.com/matrix-org/gomatrix/client.go
deleted file mode 100644
index d061e3c..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/client.go
+++ /dev/null
@@ -1,381 +0,0 @@
-// Package gomatrix implements the Matrix Client-Server API.
-//
-// Specification can be found at http://matrix.org/docs/spec/client_server/r0.2.0.html
-//
-// Example usage of this library: (blocking version)
-// cli, _ := gomatrix.NewClient("https://matrix.org", "@example:matrix.org", "MDAefhiuwehfuiwe")
-// syncer := cli.Syncer.(*gomatrix.DefaultSyncer)
-// syncer.OnEventType("m.room.message", func(ev *gomatrix.Event) {
-// fmt.Println("Message: ", ev)
-// })
-// if err := cli.Sync(); err != nil {
-// fmt.Println("Sync() returned ", err)
-// }
-//
-// To make the example non-blocking, call Sync() in a goroutine.
-package gomatrix
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "path"
- "strconv"
- "sync"
- "time"
-)
-
-// Client represents a Matrix client.
-type Client struct {
- HomeserverURL *url.URL // The base homeserver URL
- Prefix string // The API prefix eg '/_matrix/client/r0'
- UserID string // The user ID of the client. Used for forming HTTP paths which use the client's user ID.
- AccessToken string // The access_token for the client.
- syncingMutex sync.Mutex // protects syncingID
- syncingID uint32 // Identifies the current Sync. Only one Sync can be active at any given time.
- Client *http.Client // The underlying HTTP client which will be used to make HTTP requests.
- Syncer Syncer // The thing which can process /sync responses
- Store Storer // The thing which can store rooms/tokens/ids
-}
-
-// HTTPError An HTTP Error response, which may wrap an underlying native Go Error.
-type HTTPError struct {
- WrappedError error
- Message string
- Code int
-}
-
-func (e HTTPError) Error() string {
- var wrappedErrMsg string
- if e.WrappedError != nil {
- wrappedErrMsg = e.WrappedError.Error()
- }
- return fmt.Sprintf("msg=%s code=%d wrapped=%s", e.Message, e.Code, wrappedErrMsg)
-}
-
-// BuildURL builds a URL with the Client's homserver/prefix/access_token set already.
-func (cli *Client) BuildURL(urlPath ...string) string {
- ps := []string{cli.Prefix}
- for _, p := range urlPath {
- ps = append(ps, p)
- }
- return cli.BuildBaseURL(ps...)
-}
-
-// BuildBaseURL builds a URL with the Client's homeserver/access_token set already. You must
-// supply the prefix in the path.
-func (cli *Client) BuildBaseURL(urlPath ...string) string {
- // copy the URL. Purposefully ignore error as the input is from a valid URL already
- hsURL, _ := url.Parse(cli.HomeserverURL.String())
- parts := []string{hsURL.Path}
- parts = append(parts, urlPath...)
- hsURL.Path = path.Join(parts...)
- query := hsURL.Query()
- query.Set("access_token", cli.AccessToken)
- hsURL.RawQuery = query.Encode()
- return hsURL.String()
-}
-
-// BuildURLWithQuery builds a URL with query paramters in addition to the Client's homeserver/prefix/access_token set already.
-func (cli *Client) BuildURLWithQuery(urlPath []string, urlQuery map[string]string) string {
- u, _ := url.Parse(cli.BuildURL(urlPath...))
- q := u.Query()
- for k, v := range urlQuery {
- q.Set(k, v)
- }
- u.RawQuery = q.Encode()
- return u.String()
-}
-
-// Sync starts syncing with the provided Homeserver. This function will block until a fatal /sync error occurs, so should
-// almost always be started as a new goroutine. If Sync() is called twice then the first sync will be stopped.
-func (cli *Client) Sync() error {
- // Mark the client as syncing.
- // We will keep syncing until the syncing state changes. Either because
- // Sync is called or StopSync is called.
- syncingID := cli.incrementSyncingID()
- nextBatch := cli.Store.LoadNextBatch(cli.UserID)
- filterID := cli.Store.LoadFilterID(cli.UserID)
- if filterID == "" {
- filterJSON := cli.Syncer.GetFilterJSON(cli.UserID)
- resFilter, err := cli.CreateFilter(filterJSON)
- if err != nil {
- return err
- }
- filterID = resFilter.FilterID
- cli.Store.SaveFilterID(cli.UserID, filterID)
- }
-
- for {
- resSync, err := cli.SyncRequest(30000, nextBatch, filterID, false, "")
- if err != nil {
- duration, err2 := cli.Syncer.OnFailedSync(resSync, err)
- if err2 != nil {
- return err2
- }
- time.Sleep(duration)
- continue
- }
-
- // Check that the syncing state hasn't changed
- // Either because we've stopped syncing or another sync has been started.
- // We discard the response from our sync.
- if cli.getSyncingID() != syncingID {
- return nil
- }
-
- // Save the token now *before* processing it. This means it's possible
- // to not process some events, but it means that we won't get constantly stuck processing
- // a malformed/buggy event which keeps making us panic.
- cli.Store.SaveNextBatch(cli.UserID, resSync.NextBatch)
- if err = cli.Syncer.ProcessResponse(resSync, nextBatch); err != nil {
- return err
- }
-
- nextBatch = resSync.NextBatch
- }
-}
-
-func (cli *Client) incrementSyncingID() uint32 {
- cli.syncingMutex.Lock()
- defer cli.syncingMutex.Unlock()
- cli.syncingID++
- return cli.syncingID
-}
-
-func (cli *Client) getSyncingID() uint32 {
- cli.syncingMutex.Lock()
- defer cli.syncingMutex.Unlock()
- return cli.syncingID
-}
-
-// StopSync stops the ongoing sync started by Sync.
-func (cli *Client) StopSync() {
- // Advance the syncing state so that any running Syncs will terminate.
- cli.incrementSyncingID()
-}
-
-// SendJSON sends JSON to the given URL.
-//
-// Returns the HTTP body as bytes on 2xx. Returns an error if the response is not 2xx. This error
-// is an HTTPError which includes the returned HTTP status code and possibly a RespError as the
-// WrappedError, if the HTTP body could be decoded as a RespError.
-func (cli *Client) SendJSON(method string, httpURL string, contentJSON interface{}) ([]byte, error) {
- jsonStr, err := json.Marshal(contentJSON)
- if err != nil {
- return nil, err
- }
- req, err := http.NewRequest(method, httpURL, bytes.NewBuffer(jsonStr))
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", "application/json")
- res, err := cli.Client.Do(req)
- if res != nil {
- defer res.Body.Close()
- }
- if err != nil {
- return nil, err
- }
- contents, err := ioutil.ReadAll(res.Body)
- if res.StatusCode >= 300 || res.StatusCode < 200 {
- var wrap error
- var respErr RespError
- if _ = json.Unmarshal(contents, respErr); respErr.ErrCode != "" {
- wrap = respErr
- }
-
- // If we failed to decode as RespError, don't just drop the HTTP body, include it in the
- // HTTP error instead (e.g proxy errors which return HTML).
- msg := "Failed to " + method + " JSON"
- if wrap == nil {
- msg = msg + ": " + string(contents)
- }
-
- return nil, HTTPError{
- Code: res.StatusCode,
- Message: msg,
- WrappedError: wrap,
- }
- }
- if err != nil {
- return nil, err
- }
- return contents, nil
-}
-
-// CreateFilter makes an HTTP request according to http://matrix.org/docs/spec/client_server/r0.2.0.html#post-matrix-client-r0-user-userid-filter
-func (cli *Client) CreateFilter(filter json.RawMessage) (*RespCreateFilter, error) {
- urlPath := cli.BuildURL("user", cli.UserID, "filter")
- resBytes, err := cli.SendJSON("POST", urlPath, &filter)
- if err != nil {
- return nil, err
- }
- var filterResponse RespCreateFilter
- if err = json.Unmarshal(resBytes, &filterResponse); err != nil {
- return nil, err
- }
- return &filterResponse, nil
-}
-
-// SyncRequest makes an HTTP request according to http://matrix.org/docs/spec/client_server/r0.2.0.html#get-matrix-client-r0-sync
-func (cli *Client) SyncRequest(timeout int, since, filterID string, fullState bool, setPresence string) (*RespSync, error) {
- query := map[string]string{
- "timeout": strconv.Itoa(timeout),
- }
- if since != "" {
- query["since"] = since
- }
- if filterID != "" {
- query["filter"] = filterID
- }
- if setPresence != "" {
- query["set_presence"] = setPresence
- }
- if fullState {
- query["full_state"] = "true"
- }
- urlPath := cli.BuildURLWithQuery([]string{"sync"}, query)
- req, err := http.NewRequest("GET", urlPath, nil)
- if err != nil {
- return nil, err
- }
- res, err := cli.Client.Do(req)
- if res != nil {
- defer res.Body.Close()
- }
- if err != nil {
- return nil, err
- }
-
- var syncResponse RespSync
- err = json.NewDecoder(res.Body).Decode(&syncResponse)
- return &syncResponse, err
-}
-
-// JoinRoom joins the client to a room ID or alias. See http://matrix.org/docs/spec/client_server/r0.2.0.html#post-matrix-client-r0-join-roomidoralias
-//
-// If serverName is specified, this will be added as a query param to instruct the homeserver to join via that server. If content is specified, it will
-// be JSON encoded and used as the request body.
-func (cli *Client) JoinRoom(roomIDorAlias, serverName string, content interface{}) (*RespJoinRoom, error) {
- var urlPath string
- if serverName != "" {
- urlPath = cli.BuildURLWithQuery([]string{"join", roomIDorAlias}, map[string]string{
- "server_name": serverName,
- })
- } else {
- urlPath = cli.BuildURL("join", roomIDorAlias)
- }
-
- resBytes, err := cli.SendJSON("POST", urlPath, content)
- if err != nil {
- return nil, err
- }
- var joinRoomResponse RespJoinRoom
- if err = json.Unmarshal(resBytes, &joinRoomResponse); err != nil {
- return nil, err
- }
- return &joinRoomResponse, nil
-}
-
-// SetDisplayName sets the user's profile display name. See http://matrix.org/docs/spec/client_server/r0.2.0.html#put-matrix-client-r0-profile-userid-displayname
-func (cli *Client) SetDisplayName(displayName string) error {
- urlPath := cli.BuildURL("profile", cli.UserID, "displayname")
- s := struct {
- DisplayName string `json:"displayname"`
- }{displayName}
- _, err := cli.SendJSON("PUT", urlPath, &s)
- return err
-}
-
-// SendMessageEvent sends a message event into a room. See http://matrix.org/docs/spec/client_server/r0.2.0.html#put-matrix-client-r0-rooms-roomid-send-eventtype-txnid
-// contentJSON should be a pointer to something that can be encoded as JSON using json.Marshal.
-func (cli *Client) SendMessageEvent(roomID string, eventType string, contentJSON interface{}) (*RespSendEvent, error) {
- txnID := "go" + strconv.FormatInt(time.Now().UnixNano(), 10)
- urlPath := cli.BuildURL("rooms", roomID, "send", eventType, txnID)
- resBytes, err := cli.SendJSON("PUT", urlPath, contentJSON)
- if err != nil {
- return nil, err
- }
- var sendEventResponse RespSendEvent
- if err = json.Unmarshal(resBytes, &sendEventResponse); err != nil {
- return nil, err
- }
- return &sendEventResponse, nil
-}
-
-// SendText sends an m.room.message event into the given room with a msgtype of m.text
-// See http://matrix.org/docs/spec/client_server/r0.2.0.html#m-text
-func (cli *Client) SendText(roomID, text string) (*RespSendEvent, error) {
- return cli.SendMessageEvent(roomID, "m.room.message",
- TextMessage{"m.text", text})
-}
-
-// UploadLink uploads an HTTP URL and then returns an MXC URI.
-func (cli *Client) UploadLink(link string) (*RespMediaUpload, error) {
- res, err := cli.Client.Get(link)
- if res != nil {
- defer res.Body.Close()
- }
- if err != nil {
- return nil, err
- }
- return cli.UploadToContentRepo(res.Body, res.Header.Get("Content-Type"), res.ContentLength)
-}
-
-// UploadToContentRepo uploads the given bytes to the content repository and returns an MXC URI.
-// See http://matrix.org/docs/spec/client_server/r0.2.0.html#post-matrix-media-r0-upload
-func (cli *Client) UploadToContentRepo(content io.Reader, contentType string, contentLength int64) (*RespMediaUpload, error) {
- req, err := http.NewRequest("POST", cli.BuildBaseURL("_matrix/media/r0/upload"), content)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", contentType)
- req.ContentLength = contentLength
- res, err := cli.Client.Do(req)
- if res != nil {
- defer res.Body.Close()
- }
- if err != nil {
- return nil, err
- }
- if res.StatusCode != 200 {
- return nil, HTTPError{
- Message: "Upload request failed",
- Code: res.StatusCode,
- }
- }
- var m RespMediaUpload
- if err := json.NewDecoder(res.Body).Decode(&m); err != nil {
- return nil, err
- }
- return &m, nil
-}
-
-// NewClient creates a new Matrix Client ready for syncing
-func NewClient(homeserverURL, userID, accessToken string) (*Client, error) {
- hsURL, err := url.Parse(homeserverURL)
- if err != nil {
- return nil, err
- }
- // By default, use an in-memory store which will never save filter ids / next batch tokens to disk.
- // The client will work with this storer: it just won't remember across restarts.
- // In practice, a database backend should be used.
- store := NewInMemoryStore()
- cli := Client{
- AccessToken: accessToken,
- HomeserverURL: hsURL,
- UserID: userID,
- Prefix: "/_matrix/client/r0",
- Syncer: NewDefaultSyncer(userID, store),
- Store: store,
- }
- // By default, use the default HTTP client.
- cli.Client = http.DefaultClient
-
- return &cli, nil
-}
diff --git a/vendor/src/github.com/matrix-org/gomatrix/client_test.go b/vendor/src/github.com/matrix-org/gomatrix/client_test.go
deleted file mode 100644
index 6a12a9a..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/client_test.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package gomatrix
-
-import "fmt"
-
-func ExampleClient_BuildURLWithQuery() {
- cli, _ := NewClient("https://matrix.org", "@example:matrix.org", "abcdef123456")
- out := cli.BuildURLWithQuery([]string{"sync"}, map[string]string{
- "filter_id": "5",
- })
- fmt.Println(out)
- // Output: https://matrix.org/_matrix/client/r0/sync?access_token=abcdef123456&filter_id=5
-}
-
-func ExampleClient_BuildURL() {
- userID := "@example:matrix.org"
- cli, _ := NewClient("https://matrix.org", userID, "abcdef123456")
- out := cli.BuildURL("user", userID, "filter")
- fmt.Println(out)
- // Output: https://matrix.org/_matrix/client/r0/user/@example:matrix.org/filter?access_token=abcdef123456
-}
-
-func ExampleClient_BuildBaseURL() {
- userID := "@example:matrix.org"
- cli, _ := NewClient("https://matrix.org", userID, "abcdef123456")
- out := cli.BuildBaseURL("_matrix", "client", "r0", "directory", "room", "#matrix:matrix.org")
- fmt.Println(out)
- // Output: https://matrix.org/_matrix/client/r0/directory/room/%23matrix:matrix.org?access_token=abcdef123456
-}
diff --git a/vendor/src/github.com/matrix-org/gomatrix/events.go b/vendor/src/github.com/matrix-org/gomatrix/events.go
deleted file mode 100644
index 6ea259e..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/events.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package gomatrix
-
-import (
- "html"
- "regexp"
-)
-
-// Event represents a single Matrix event.
-type Event struct {
- StateKey string `json:"state_key"` // The state key for the event. Only present on State Events.
- Sender string `json:"sender"` // The user ID of the sender of the event
- Type string `json:"type"` // The event type
- Timestamp int `json:"origin_server_ts"` // The unix timestamp when this message was sent by the origin server
- ID string `json:"event_id"` // The unique ID of this event
- RoomID string `json:"room_id"` // The room the event was sent to. May be nil (e.g. for presence)
- Content map[string]interface{} `json:"content"` // The JSON content of the event.
-}
-
-// Body returns the value of the "body" key in the event content if it is
-// present and is a string.
-func (event *Event) Body() (body string, ok bool) {
- value, exists := event.Content["body"]
- if !exists {
- return
- }
- body, ok = value.(string)
- return
-}
-
-// MessageType returns the value of the "msgtype" key in the event content if
-// it is present and is a string.
-func (event *Event) MessageType() (msgtype string, ok bool) {
- value, exists := event.Content["msgtype"]
- if !exists {
- return
- }
- msgtype, ok = value.(string)
- return
-}
-
-// TextMessage is the contents of a Matrix formated message event.
-type TextMessage struct {
- MsgType string `json:"msgtype"`
- Body string `json:"body"`
-}
-
-// ImageInfo contains info about an image
-type ImageInfo struct {
- Height uint `json:"h"`
- Width uint `json:"w"`
- Mimetype string `json:"mimetype"`
- Size uint `json:"size"`
-}
-
-// ImageMessage is an m.image event
-type ImageMessage struct {
- MsgType string `json:"msgtype"`
- Body string `json:"body"`
- URL string `json:"url"`
- Info ImageInfo `json:"info"`
-}
-
-// An HTMLMessage is the contents of a Matrix HTML formated message event.
-type HTMLMessage struct {
- Body string `json:"body"`
- MsgType string `json:"msgtype"`
- Format string `json:"format"`
- FormattedBody string `json:"formatted_body"`
-}
-
-var htmlRegex = regexp.MustCompile("<[^<]+?>")
-
-// GetHTMLMessage returns an HTMLMessage with the body set to a stripped version of the provided HTML, in addition
-// to the provided HTML.
-func GetHTMLMessage(msgtype, htmlText string) HTMLMessage {
- return HTMLMessage{
- Body: html.UnescapeString(htmlRegex.ReplaceAllLiteralString(htmlText, "")),
- MsgType: msgtype,
- Format: "org.matrix.custom.html",
- FormattedBody: htmlText,
- }
-}
diff --git a/vendor/src/github.com/matrix-org/gomatrix/hooks/install.sh b/vendor/src/github.com/matrix-org/gomatrix/hooks/install.sh
deleted file mode 100644
index f8aa331..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/hooks/install.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#! /bin/bash
-
-DOT_GIT="$(dirname $0)/../.git"
-
-ln -s "../../hooks/pre-commit" "$DOT_GIT/hooks/pre-commit"
\ No newline at end of file
diff --git a/vendor/src/github.com/matrix-org/gomatrix/hooks/pre-commit b/vendor/src/github.com/matrix-org/gomatrix/hooks/pre-commit
deleted file mode 100644
index 6a14ccf..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/hooks/pre-commit
+++ /dev/null
@@ -1,9 +0,0 @@
-#! /bin/bash
-
-set -eu
-
-golint
-go fmt
-go tool vet --shadow .
-gocyclo -over 12 .
-go test -timeout 5s -test.v
diff --git a/vendor/src/github.com/matrix-org/gomatrix/responses.go b/vendor/src/github.com/matrix-org/gomatrix/responses.go
deleted file mode 100644
index 76bfbe2..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/responses.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package gomatrix
-
-// RespError is the standard JSON error response from Homeservers. It also implements the Golang "error" interface.
-// See http://matrix.org/docs/spec/client_server/r0.2.0.html#api-standards
-type RespError struct {
- ErrCode string `json:"errcode"`
- Err string `json:"error"`
-}
-
-// Error returns the errcode and error message.
-func (e RespError) Error() string {
- return e.ErrCode + ": " + e.Err
-}
-
-// RespCreateFilter is the JSON response for http://matrix.org/docs/spec/client_server/r0.2.0.html#post-matrix-client-r0-user-userid-filter
-type RespCreateFilter struct {
- FilterID string `json:"filter_id"`
-}
-
-// RespJoinRoom is the JSON response for http://matrix.org/docs/spec/client_server/r0.2.0.html#post-matrix-client-r0-rooms-roomid-join
-type RespJoinRoom struct {
- RoomID string `json:"room_id"`
-}
-
-// RespSendEvent is the JSON response for http://matrix.org/docs/spec/client_server/r0.2.0.html#put-matrix-client-r0-rooms-roomid-send-eventtype-txnid
-type RespSendEvent struct {
- EventID string `json:"event_id"`
-}
-
-// RespMediaUpload is the JSON response for http://matrix.org/docs/spec/client_server/r0.2.0.html#post-matrix-media-r0-upload
-type RespMediaUpload struct {
- ContentURI string `json:"content_uri"`
-}
-
-// RespSync is the JSON response for http://matrix.org/docs/spec/client_server/r0.2.0.html#get-matrix-client-r0-sync
-type RespSync struct {
- NextBatch string `json:"next_batch"`
- AccountData struct {
- Events []Event `json:"events"`
- } `json:"account_data"`
- Presence struct {
- Events []Event `json:"events"`
- } `json:"presence"`
- Rooms struct {
- Join map[string]struct {
- State struct {
- Events []Event `json:"events"`
- } `json:"state"`
- Timeline struct {
- Events []Event `json:"events"`
- Limited bool `json:"limited"`
- PrevBatch string `json:"prev_batch"`
- } `json:"timeline"`
- } `json:"join"`
- Invite map[string]struct {
- State struct {
- Events []Event
- } `json:"invite_state"`
- } `json:"invite"`
- } `json:"rooms"`
-}
diff --git a/vendor/src/github.com/matrix-org/gomatrix/room.go b/vendor/src/github.com/matrix-org/gomatrix/room.go
deleted file mode 100644
index 0533b3e..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/room.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package gomatrix
-
-// Room represents a single Matrix room.
-type Room struct {
- ID string
- State map[string]map[string]*Event
-}
-
-// UpdateState updates the room's current state with the given Event. This will clobber events based
-// on the type/state_key combination.
-func (room Room) UpdateState(event *Event) {
- _, exists := room.State[event.Type]
- if !exists {
- room.State[event.Type] = make(map[string]*Event)
- }
- room.State[event.Type][event.StateKey] = event
-}
-
-// GetStateEvent returns the state event for the given type/state_key combo, or nil.
-func (room Room) GetStateEvent(eventType string, stateKey string) *Event {
- stateEventMap, _ := room.State[eventType]
- event, _ := stateEventMap[stateKey]
- return event
-}
-
-// GetMembershipState returns the membership state of the given user ID in this room. If there is
-// no entry for this member, 'leave' is returned for consistency with left users.
-func (room Room) GetMembershipState(userID string) string {
- state := "leave"
- event := room.GetStateEvent("m.room.member", userID)
- if event != nil {
- membershipState, found := event.Content["membership"]
- if found {
- mState, isString := membershipState.(string)
- if isString {
- state = mState
- }
- }
- }
- return state
-}
-
-// NewRoom creates a new Room with the given ID
-func NewRoom(roomID string) *Room {
- // Init the State map and return a pointer to the Room
- return &Room{
- ID: roomID,
- State: make(map[string]map[string]*Event),
- }
-}
diff --git a/vendor/src/github.com/matrix-org/gomatrix/store.go b/vendor/src/github.com/matrix-org/gomatrix/store.go
deleted file mode 100644
index 6dc687e..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/store.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package gomatrix
-
-// Storer is an interface which must be satisfied to store client data.
-//
-// You can either write a struct which persists this data to disk, or you can use the
-// provided "InMemoryStore" which just keeps data around in-memory which is lost on
-// restarts.
-type Storer interface {
- SaveFilterID(userID, filterID string)
- LoadFilterID(userID string) string
- SaveNextBatch(userID, nextBatchToken string)
- LoadNextBatch(userID string) string
- SaveRoom(room *Room)
- LoadRoom(roomID string) *Room
-}
-
-// InMemoryStore implements the Storer interface.
-//
-// Everything is persisted in-memory as maps. It is not safe to load/save filter IDs
-// or next batch tokens on any goroutine other than the syncing goroutine: the one
-// which called Client.Sync().
-type InMemoryStore struct {
- Filters map[string]string
- NextBatch map[string]string
- Rooms map[string]*Room
-}
-
-// SaveFilterID to memory.
-func (s *InMemoryStore) SaveFilterID(userID, filterID string) {
- s.Filters[userID] = filterID
-}
-
-// LoadFilterID from memory.
-func (s *InMemoryStore) LoadFilterID(userID string) string {
- return s.Filters[userID]
-}
-
-// SaveNextBatch to memory.
-func (s *InMemoryStore) SaveNextBatch(userID, nextBatchToken string) {
- s.NextBatch[userID] = nextBatchToken
-}
-
-// LoadNextBatch from memory.
-func (s *InMemoryStore) LoadNextBatch(userID string) string {
- return s.NextBatch[userID]
-}
-
-// SaveRoom to memory.
-func (s *InMemoryStore) SaveRoom(room *Room) {
- s.Rooms[room.ID] = room
-}
-
-// LoadRoom from memory.
-func (s *InMemoryStore) LoadRoom(roomID string) *Room {
- return s.Rooms[roomID]
-}
-
-// NewInMemoryStore constructs a new InMemoryStore.
-func NewInMemoryStore() *InMemoryStore {
- return &InMemoryStore{
- Filters: make(map[string]string),
- NextBatch: make(map[string]string),
- Rooms: make(map[string]*Room),
- }
-}
diff --git a/vendor/src/github.com/matrix-org/gomatrix/sync.go b/vendor/src/github.com/matrix-org/gomatrix/sync.go
deleted file mode 100644
index 347e5dc..0000000
--- a/vendor/src/github.com/matrix-org/gomatrix/sync.go
+++ /dev/null
@@ -1,154 +0,0 @@
-package gomatrix
-
-import (
- "encoding/json"
- "fmt"
- "runtime/debug"
- "time"
-)
-
-// Syncer represents an interface that must be satisfied in order to do /sync requests on a client.
-type Syncer interface {
- // Process the /sync response. The since parameter is the since= value that was used to produce the response.
- // This is useful for detecting the very first sync (since=""). If an error is return, Syncing will be stopped
- // permanently.
- ProcessResponse(resp *RespSync, since string) error
- // OnFailedSync returns either the time to wait before retrying or an error to stop syncing permanently.
- OnFailedSync(res *RespSync, err error) (time.Duration, error)
- // GetFilterJSON for the given user ID. NOT the filter ID.
- GetFilterJSON(userID string) json.RawMessage
-}
-
-// DefaultSyncer is the default syncing implementation. You can either write your own syncer, or selectively
-// replace parts of this default syncer (e.g. the ProcessResponse method). The default syncer uses the observer
-// pattern to notify callers about incoming events. See DefaultSyncer.OnEventType for more information.
-type DefaultSyncer struct {
- UserID string
- Store Storer
- listeners map[string][]OnEventListener // event type to listeners array
-}
-
-// OnEventListener can be used with DefaultSyncer.OnEventType to be informed of incoming events.
-type OnEventListener func(*Event)
-
-// NewDefaultSyncer returns an instantiated DefaultSyncer
-func NewDefaultSyncer(userID string, store Storer) *DefaultSyncer {
- return &DefaultSyncer{
- UserID: userID,
- Store: store,
- listeners: make(map[string][]OnEventListener),
- }
-}
-
-// ProcessResponse processes the /sync response in a way suitable for bots. "Suitable for bots" means a stream of
-// unrepeating events. Returns a fatal error if a listener panics.
-func (s *DefaultSyncer) ProcessResponse(res *RespSync, since string) (err error) {
- if !s.shouldProcessResponse(res, since) {
- return
- }
-
- defer func() {
- if r := recover(); r != nil {
- err = fmt.Errorf("ProcessResponse panicked! userID=%s since=%s panic=%s\n%s", s.UserID, since, r, debug.Stack())
- }
- }()
-
- for roomID, roomData := range res.Rooms.Join {
- room := s.getOrCreateRoom(roomID)
- for _, event := range roomData.State.Events {
- event.RoomID = roomID
- room.UpdateState(&event)
- s.notifyListeners(&event)
- }
- for _, event := range roomData.Timeline.Events {
- event.RoomID = roomID
- s.notifyListeners(&event)
- }
- }
- for roomID, roomData := range res.Rooms.Invite {
- room := s.getOrCreateRoom(roomID)
- for _, event := range roomData.State.Events {
- event.RoomID = roomID
- room.UpdateState(&event)
- s.notifyListeners(&event)
- }
- }
- return
-}
-
-// OnEventType allows callers to be notified when there are new events for the given event type.
-// There are no duplicate checks.
-func (s *DefaultSyncer) OnEventType(eventType string, callback OnEventListener) {
- _, exists := s.listeners[eventType]
- if !exists {
- s.listeners[eventType] = []OnEventListener{}
- }
- s.listeners[eventType] = append(s.listeners[eventType], callback)
-}
-
-// shouldProcessResponse returns true if the response should be processed. May modify the response to remove
-// stuff that shouldn't be processed.
-func (s *DefaultSyncer) shouldProcessResponse(resp *RespSync, since string) bool {
- if since == "" {
- return false
- }
- // This is a horrible hack because /sync will return the most recent messages for a room
- // as soon as you /join it. We do NOT want to process those events in that particular room
- // because they may have already been processed (if you toggle the bot in/out of the room).
- //
- // Work around this by inspecting each room's timeline and seeing if an m.room.member event for us
- // exists and is "join" and then discard processing that room entirely if so.
- // TODO: We probably want to process messages from after the last join event in the timeline.
- for roomID, roomData := range resp.Rooms.Join {
- for i := len(roomData.Timeline.Events) - 1; i >= 0; i-- {
- e := roomData.Timeline.Events[i]
- if e.Type == "m.room.member" && e.StateKey == s.UserID {
- m := e.Content["membership"]
- mship, ok := m.(string)
- if !ok {
- continue
- }
- if mship == "join" {
- _, ok := resp.Rooms.Join[roomID]
- if !ok {
- continue
- }
- delete(resp.Rooms.Join, roomID) // don't re-process messages
- delete(resp.Rooms.Invite, roomID) // don't re-process invites
- break
- }
- }
- }
- }
- return true
-}
-
-// getOrCreateRoom must only be called by the Sync() goroutine which calls ProcessResponse()
-func (s *DefaultSyncer) getOrCreateRoom(roomID string) *Room {
- room := s.Store.LoadRoom(roomID)
- if room == nil { // create a new Room
- room = NewRoom(roomID)
- s.Store.SaveRoom(room)
- }
- return room
-}
-
-func (s *DefaultSyncer) notifyListeners(event *Event) {
- listeners, exists := s.listeners[event.Type]
- if !exists {
- return
- }
- for _, fn := range listeners {
- fn(event)
- }
-}
-
-// OnFailedSync always returns a 10 second wait period between failed /syncs, never a fatal error.
-func (s *DefaultSyncer) OnFailedSync(res *RespSync, err error) (time.Duration, error) {
- return 10 * time.Second, nil
-}
-
-// GetFilterJSON returns a filter with a timeline limit of 50.
-func (s *DefaultSyncer) GetFilterJSON(userID string) json.RawMessage {
- return json.RawMessage(`{"room":{"timeline":{"limit":50}}}`)
-}
diff --git a/vendor/src/github.com/matrix-org/util/LICENSE b/vendor/src/github.com/matrix-org/util/LICENSE
deleted file mode 100644
index 8dada3e..0000000
--- a/vendor/src/github.com/matrix-org/util/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/src/github.com/matrix-org/util/README.md b/vendor/src/github.com/matrix-org/util/README.md
deleted file mode 100644
index 319e4b5..0000000
--- a/vendor/src/github.com/matrix-org/util/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# util
-
-[![GoDoc](https://godoc.org/github.com/matrix-org/util?status.svg)](https://godoc.org/github.com/matrix-org/util)
-[![Build Status](https://travis-ci.org/matrix-org/util.svg?branch=master)](https://travis-ci.org/matrix-org/util)
-[![Coverage Status](https://coveralls.io/repos/github/matrix-org/util/badge.svg)](https://coveralls.io/github/matrix-org/util)
-
-A loose collection of Golang functions that we use at matrix.org
diff --git a/vendor/src/github.com/matrix-org/util/context.go b/vendor/src/github.com/matrix-org/util/context.go
deleted file mode 100644
index d8def4f..0000000
--- a/vendor/src/github.com/matrix-org/util/context.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package util
-
-import (
- "context"
-
- log "github.com/Sirupsen/logrus"
-)
-
-// contextKeys is a type alias for string to namespace Context keys per-package.
-type contextKeys string
-
-// ctxValueRequestID is the key to extract the request ID for an HTTP request
-const ctxValueRequestID = contextKeys("requestid")
-
-// GetRequestID returns the request ID associated with this context, or the empty string
-// if one is not associated with this context.
-func GetRequestID(ctx context.Context) string {
- id := ctx.Value(ctxValueRequestID)
- if id == nil {
- return ""
- }
- return id.(string)
-}
-
-// ctxValueLogger is the key to extract the logrus Logger.
-const ctxValueLogger = contextKeys("logger")
-
-// GetLogger retrieves the logrus logger from the supplied context. Returns nil if there is no logger.
-func GetLogger(ctx context.Context) *log.Entry {
- l := ctx.Value(ctxValueLogger)
- if l == nil {
- return nil
- }
- return l.(*log.Entry)
-}
diff --git a/vendor/src/github.com/matrix-org/util/hooks/install.sh b/vendor/src/github.com/matrix-org/util/hooks/install.sh
deleted file mode 100644
index f8aa331..0000000
--- a/vendor/src/github.com/matrix-org/util/hooks/install.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#! /bin/bash
-
-DOT_GIT="$(dirname $0)/../.git"
-
-ln -s "../../hooks/pre-commit" "$DOT_GIT/hooks/pre-commit"
\ No newline at end of file
diff --git a/vendor/src/github.com/matrix-org/util/hooks/pre-commit b/vendor/src/github.com/matrix-org/util/hooks/pre-commit
deleted file mode 100644
index 41df674..0000000
--- a/vendor/src/github.com/matrix-org/util/hooks/pre-commit
+++ /dev/null
@@ -1,9 +0,0 @@
-#! /bin/bash
-
-set -eu
-
-golint
-go fmt
-go tool vet --all --shadow .
-gocyclo -over 12 .
-go test -timeout 5s -test.v
diff --git a/vendor/src/github.com/matrix-org/util/json.go b/vendor/src/github.com/matrix-org/util/json.go
deleted file mode 100644
index b0834ea..0000000
--- a/vendor/src/github.com/matrix-org/util/json.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package util
-
-import (
- "context"
- "encoding/json"
- "math/rand"
- "net/http"
- "runtime/debug"
- "time"
-
- log "github.com/Sirupsen/logrus"
-)
-
-// JSONResponse represents an HTTP response which contains a JSON body.
-type JSONResponse struct {
- // HTTP status code.
- Code int
- // JSON represents the JSON that should be serialized and sent to the client
- JSON interface{}
- // Headers represent any headers that should be sent to the client
- Headers map[string]string
-}
-
-// Is2xx returns true if the Code is between 200 and 299.
-func (r JSONResponse) Is2xx() bool {
- return r.Code/100 == 2
-}
-
-// RedirectResponse returns a JSONResponse which 302s the client to the given location.
-func RedirectResponse(location string) JSONResponse {
- headers := make(map[string]string)
- headers["Location"] = location
- return JSONResponse{
- Code: 302,
- JSON: struct{}{},
- Headers: headers,
- }
-}
-
-// MessageResponse returns a JSONResponse with a 'message' key containing the given text.
-func MessageResponse(code int, msg string) JSONResponse {
- return JSONResponse{
- Code: code,
- JSON: struct {
- Message string `json:"message"`
- }{msg},
- }
-}
-
-// ErrorResponse returns an HTTP 500 JSONResponse with the stringified form of the given error.
-func ErrorResponse(err error) JSONResponse {
- return MessageResponse(500, err.Error())
-}
-
-// JSONRequestHandler represents an interface that must be satisfied in order to respond to incoming
-// HTTP requests with JSON.
-type JSONRequestHandler interface {
- OnIncomingRequest(req *http.Request) JSONResponse
-}
-
-// Protect panicking HTTP requests from taking down the entire process, and log them using
-// the correct logger, returning a 500 with a JSON response rather than abruptly closing the
-// connection. The http.Request MUST have a ctxValueLogger.
-func Protect(handler http.HandlerFunc) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- defer func() {
- if r := recover(); r != nil {
- logger := req.Context().Value(ctxValueLogger).(*log.Entry)
- logger.WithFields(log.Fields{
- "panic": r,
- }).Errorf(
- "Request panicked!\n%s", debug.Stack(),
- )
- respond(w, req, MessageResponse(500, "Internal Server Error"))
- }
- }()
- handler(w, req)
- }
-}
-
-// MakeJSONAPI creates an HTTP handler which always responds to incoming requests with JSON responses.
-// Incoming http.Requests will have a logger (with a request ID/method/path logged) attached to the Context.
-// This can be accessed via GetLogger(Context).
-func MakeJSONAPI(handler JSONRequestHandler) http.HandlerFunc {
- return Protect(func(w http.ResponseWriter, req *http.Request) {
- reqID := RandomString(12)
- // Set a Logger and request ID on the context
- ctx := context.WithValue(req.Context(), ctxValueLogger, log.WithFields(log.Fields{
- "req.method": req.Method,
- "req.path": req.URL.Path,
- "req.id": reqID,
- }))
- ctx = context.WithValue(ctx, ctxValueRequestID, reqID)
- req = req.WithContext(ctx)
-
- logger := req.Context().Value(ctxValueLogger).(*log.Entry)
- logger.Print("Incoming request")
-
- res := handler.OnIncomingRequest(req)
-
- // Set common headers returned regardless of the outcome of the request
- w.Header().Set("Content-Type", "application/json")
- SetCORSHeaders(w)
-
- respond(w, req, res)
- })
-}
-
-func respond(w http.ResponseWriter, req *http.Request, res JSONResponse) {
- logger := req.Context().Value(ctxValueLogger).(*log.Entry)
-
- // Set custom headers
- if res.Headers != nil {
- for h, val := range res.Headers {
- w.Header().Set(h, val)
- }
- }
-
- // Marshal JSON response into raw bytes to send as the HTTP body
- resBytes, err := json.Marshal(res.JSON)
- if err != nil {
- logger.WithError(err).Error("Failed to marshal JSONResponse")
- // this should never fail to be marshalled so drop err to the floor
- res = MessageResponse(500, "Internal Server Error")
- resBytes, _ = json.Marshal(res.JSON)
- }
-
- // Set status code and write the body
- w.WriteHeader(res.Code)
- logger.WithField("code", res.Code).Infof("Responding (%d bytes)", len(resBytes))
- w.Write(resBytes)
-}
-
-// WithCORSOptions intercepts all OPTIONS requests and responds with CORS headers. The request handler
-// is not invoked when this happens.
-func WithCORSOptions(handler http.HandlerFunc) http.HandlerFunc {
- return func(w http.ResponseWriter, req *http.Request) {
- if req.Method == "OPTIONS" {
- SetCORSHeaders(w)
- return
- }
- handler(w, req)
- }
-}
-
-// SetCORSHeaders sets unrestricted origin Access-Control headers on the response writer
-func SetCORSHeaders(w http.ResponseWriter) {
- w.Header().Set("Access-Control-Allow-Origin", "*")
- w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS")
- w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
-}
-
-const alphanumerics = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
-
-// RandomString generates a pseudo-random string of length n.
-func RandomString(n int) string {
- b := make([]byte, n)
- for i := range b {
- b[i] = alphanumerics[rand.Int63()%int64(len(alphanumerics))]
- }
- return string(b)
-}
-
-func init() {
- rand.Seed(time.Now().UTC().UnixNano())
-}
diff --git a/vendor/src/github.com/matrix-org/util/json_test.go b/vendor/src/github.com/matrix-org/util/json_test.go
deleted file mode 100644
index 687db27..0000000
--- a/vendor/src/github.com/matrix-org/util/json_test.go
+++ /dev/null
@@ -1,239 +0,0 @@
-package util
-
-import (
- "context"
- "errors"
- "net/http"
- "net/http/httptest"
- "testing"
-
- log "github.com/Sirupsen/logrus"
-)
-
-type MockJSONRequestHandler struct {
- handler func(req *http.Request) JSONResponse
-}
-
-func (h *MockJSONRequestHandler) OnIncomingRequest(req *http.Request) JSONResponse {
- return h.handler(req)
-}
-
-type MockResponse struct {
- Foo string `json:"foo"`
-}
-
-func TestMakeJSONAPI(t *testing.T) {
- log.SetLevel(log.PanicLevel) // suppress logs in test output
- tests := []struct {
- Return JSONResponse
- ExpectCode int
- ExpectJSON string
- }{
- // MessageResponse return values
- {MessageResponse(500, "Everything is broken"), 500, `{"message":"Everything is broken"}`},
- // interface return values
- {JSONResponse{500, MockResponse{"yep"}, nil}, 500, `{"foo":"yep"}`},
- // Error JSON return values which fail to be marshalled should fallback to text
- {JSONResponse{500, struct {
- Foo interface{} `json:"foo"`
- }{func(cannotBe, marshalled string) {}}, nil}, 500, `{"message":"Internal Server Error"}`},
- // With different status codes
- {JSONResponse{201, MockResponse{"narp"}, nil}, 201, `{"foo":"narp"}`},
- // Top-level array success values
- {JSONResponse{200, []MockResponse{{"yep"}, {"narp"}}, nil}, 200, `[{"foo":"yep"},{"foo":"narp"}]`},
- }
-
- for _, tst := range tests {
- mock := MockJSONRequestHandler{func(req *http.Request) JSONResponse {
- return tst.Return
- }}
- mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
- mockWriter := httptest.NewRecorder()
- handlerFunc := MakeJSONAPI(&mock)
- handlerFunc(mockWriter, mockReq)
- if mockWriter.Code != tst.ExpectCode {
- t.Errorf("TestMakeJSONAPI wanted HTTP status %d, got %d", tst.ExpectCode, mockWriter.Code)
- }
- actualBody := mockWriter.Body.String()
- if actualBody != tst.ExpectJSON {
- t.Errorf("TestMakeJSONAPI wanted body '%s', got '%s'", tst.ExpectJSON, actualBody)
- }
- }
-}
-
-func TestMakeJSONAPICustomHeaders(t *testing.T) {
- mock := MockJSONRequestHandler{func(req *http.Request) JSONResponse {
- headers := make(map[string]string)
- headers["Custom"] = "Thing"
- headers["X-Custom"] = "Things"
- return JSONResponse{
- Code: 200,
- JSON: MockResponse{"yep"},
- Headers: headers,
- }
- }}
- mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
- mockWriter := httptest.NewRecorder()
- handlerFunc := MakeJSONAPI(&mock)
- handlerFunc(mockWriter, mockReq)
- if mockWriter.Code != 200 {
- t.Errorf("TestMakeJSONAPICustomHeaders wanted HTTP status 200, got %d", mockWriter.Code)
- }
- h := mockWriter.Header().Get("Custom")
- if h != "Thing" {
- t.Errorf("TestMakeJSONAPICustomHeaders wanted header 'Custom: Thing' , got 'Custom: %s'", h)
- }
- h = mockWriter.Header().Get("X-Custom")
- if h != "Things" {
- t.Errorf("TestMakeJSONAPICustomHeaders wanted header 'X-Custom: Things' , got 'X-Custom: %s'", h)
- }
-}
-
-func TestMakeJSONAPIRedirect(t *testing.T) {
- log.SetLevel(log.PanicLevel) // suppress logs in test output
- mock := MockJSONRequestHandler{func(req *http.Request) JSONResponse {
- return RedirectResponse("https://matrix.org")
- }}
- mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
- mockWriter := httptest.NewRecorder()
- handlerFunc := MakeJSONAPI(&mock)
- handlerFunc(mockWriter, mockReq)
- if mockWriter.Code != 302 {
- t.Errorf("TestMakeJSONAPIRedirect wanted HTTP status 302, got %d", mockWriter.Code)
- }
- location := mockWriter.Header().Get("Location")
- if location != "https://matrix.org" {
- t.Errorf("TestMakeJSONAPIRedirect wanted Location header 'https://matrix.org', got '%s'", location)
- }
-}
-
-func TestMakeJSONAPIError(t *testing.T) {
- log.SetLevel(log.PanicLevel) // suppress logs in test output
- mock := MockJSONRequestHandler{func(req *http.Request) JSONResponse {
- err := errors.New("oops")
- return ErrorResponse(err)
- }}
- mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
- mockWriter := httptest.NewRecorder()
- handlerFunc := MakeJSONAPI(&mock)
- handlerFunc(mockWriter, mockReq)
- if mockWriter.Code != 500 {
- t.Errorf("TestMakeJSONAPIError wanted HTTP status 500, got %d", mockWriter.Code)
- }
- actualBody := mockWriter.Body.String()
- expect := `{"message":"oops"}`
- if actualBody != expect {
- t.Errorf("TestMakeJSONAPIError wanted body '%s', got '%s'", expect, actualBody)
- }
-}
-
-func TestIs2xx(t *testing.T) {
- tests := []struct {
- Code int
- Expect bool
- }{
- {200, true},
- {201, true},
- {299, true},
- {300, false},
- {199, false},
- {0, false},
- {500, false},
- }
- for _, test := range tests {
- j := JSONResponse{
- Code: test.Code,
- }
- actual := j.Is2xx()
- if actual != test.Expect {
- t.Errorf("TestIs2xx wanted %t, got %t", test.Expect, actual)
- }
- }
-}
-
-func TestGetLogger(t *testing.T) {
- log.SetLevel(log.PanicLevel) // suppress logs in test output
- entry := log.WithField("test", "yep")
- mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
- ctx := context.WithValue(mockReq.Context(), ctxValueLogger, entry)
- mockReq = mockReq.WithContext(ctx)
- ctxLogger := GetLogger(mockReq.Context())
- if ctxLogger != entry {
- t.Errorf("TestGetLogger wanted logger '%v', got '%v'", entry, ctxLogger)
- }
-
- noLoggerInReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
- ctxLogger = GetLogger(noLoggerInReq.Context())
- if ctxLogger != nil {
- t.Errorf("TestGetLogger wanted nil logger, got '%v'", ctxLogger)
- }
-}
-
-func TestProtect(t *testing.T) {
- log.SetLevel(log.PanicLevel) // suppress logs in test output
- mockWriter := httptest.NewRecorder()
- mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
- mockReq = mockReq.WithContext(
- context.WithValue(mockReq.Context(), ctxValueLogger, log.WithField("test", "yep")),
- )
- h := Protect(func(w http.ResponseWriter, req *http.Request) {
- panic("oh noes!")
- })
-
- h(mockWriter, mockReq)
-
- expectCode := 500
- if mockWriter.Code != expectCode {
- t.Errorf("TestProtect wanted HTTP status %d, got %d", expectCode, mockWriter.Code)
- }
-
- expectBody := `{"message":"Internal Server Error"}`
- actualBody := mockWriter.Body.String()
- if actualBody != expectBody {
- t.Errorf("TestProtect wanted body %s, got %s", expectBody, actualBody)
- }
-}
-
-func TestWithCORSOptions(t *testing.T) {
- log.SetLevel(log.PanicLevel) // suppress logs in test output
- mockWriter := httptest.NewRecorder()
- mockReq, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil)
- h := WithCORSOptions(func(w http.ResponseWriter, req *http.Request) {
- w.WriteHeader(200)
- w.Write([]byte("yep"))
- })
- h(mockWriter, mockReq)
- if mockWriter.Code != 200 {
- t.Errorf("TestWithCORSOptions wanted HTTP status 200, got %d", mockWriter.Code)
- }
-
- origin := mockWriter.Header().Get("Access-Control-Allow-Origin")
- if origin != "*" {
- t.Errorf("TestWithCORSOptions wanted Access-Control-Allow-Origin header '*', got '%s'", origin)
- }
-
- // OPTIONS request shouldn't hit the handler func
- expectBody := ""
- actualBody := mockWriter.Body.String()
- if actualBody != expectBody {
- t.Errorf("TestWithCORSOptions wanted body %s, got %s", expectBody, actualBody)
- }
-}
-
-func TestGetRequestID(t *testing.T) {
- log.SetLevel(log.PanicLevel) // suppress logs in test output
- reqID := "alphabetsoup"
- mockReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
- ctx := context.WithValue(mockReq.Context(), ctxValueRequestID, reqID)
- mockReq = mockReq.WithContext(ctx)
- ctxReqID := GetRequestID(mockReq.Context())
- if reqID != ctxReqID {
- t.Errorf("TestGetRequestID wanted request ID '%s', got '%s'", reqID, ctxReqID)
- }
-
- noReqIDInReq, _ := http.NewRequest("GET", "http://example.com/foo", nil)
- ctxReqID = GetRequestID(noReqIDInReq.Context())
- if ctxReqID != "" {
- t.Errorf("TestGetRequestID wanted empty request ID, got '%s'", ctxReqID)
- }
-}
diff --git a/vendor/src/github.com/mattn/go-shellwords/README.md b/vendor/src/github.com/mattn/go-shellwords/README.md
deleted file mode 100644
index 56f357f..0000000
--- a/vendor/src/github.com/mattn/go-shellwords/README.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# go-shellwords
-
-[![Coverage Status](https://coveralls.io/repos/mattn/go-shellwords/badge.png?branch=master)](https://coveralls.io/r/mattn/go-shellwords?branch=master)
-[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords)
-
-Parse line as shell words.
-
-## Usage
-
-```go
-args, err := shellwords.Parse("./foo --bar=baz")
-// args should be ["./foo", "--bar=baz"]
-```
-
-```go
-os.Setenv("FOO", "bar")
-p := shellwords.NewParser()
-p.ParseEnv = true
-args, err := p.Parse("./foo $FOO")
-// args should be ["./foo", "bar"]
-```
-
-```go
-p := shellwords.NewParser()
-p.ParseBacktick = true
-args, err := p.Parse("./foo `echo $SHELL`")
-// args should be ["./foo", "/bin/bash"]
-```
-
-```go
-shellwords.ParseBacktick = true
-p := shellwords.NewParser()
-args, err := p.Parse("./foo `echo $SHELL`")
-// args should be ["./foo", "/bin/bash"]
-```
-
-# Thanks
-
-This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine).
-
-# License
-
-under the MIT License: http://mattn.mit-license.org/2014
-
-# Author
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/src/github.com/mattn/go-shellwords/shellwords.go b/vendor/src/github.com/mattn/go-shellwords/shellwords.go
deleted file mode 100644
index 1abaa6c..0000000
--- a/vendor/src/github.com/mattn/go-shellwords/shellwords.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package shellwords
-
-import (
- "errors"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- ParseEnv bool = false
- ParseBacktick bool = false
-)
-
-var envRe = regexp.MustCompile(`\$({[a-zA-Z0-9_]+}|[a-zA-Z0-9_]+)`)
-
-func isSpace(r rune) bool {
- switch r {
- case ' ', '\t', '\r', '\n':
- return true
- }
- return false
-}
-
-func replaceEnv(s string) string {
- return envRe.ReplaceAllStringFunc(s, func(s string) string {
- s = s[1:]
- if s[0] == '{' {
- s = s[1 : len(s)-1]
- }
- return os.Getenv(s)
- })
-}
-
-type Parser struct {
- ParseEnv bool
- ParseBacktick bool
-}
-
-func NewParser() *Parser {
- return &Parser{ParseEnv, ParseBacktick}
-}
-
-func (p *Parser) Parse(line string) ([]string, error) {
- line = strings.TrimSpace(line)
-
- args := []string{}
- buf := ""
- var escaped, doubleQuoted, singleQuoted, backQuote bool
- backtick := ""
-
- for _, r := range line {
- if escaped {
- buf += string(r)
- escaped = false
- continue
- }
-
- if r == '\\' {
- if singleQuoted {
- buf += string(r)
- } else {
- escaped = true
- }
- continue
- }
-
- if isSpace(r) {
- if singleQuoted || doubleQuoted || backQuote {
- buf += string(r)
- backtick += string(r)
- } else if buf != "" {
- if p.ParseEnv {
- buf = replaceEnv(buf)
- }
- args = append(args, buf)
- buf = ""
- }
- continue
- }
-
- switch r {
- case '`':
- if !singleQuoted && !doubleQuoted {
- if p.ParseBacktick {
- if backQuote {
- out, err := shellRun(backtick)
- if err != nil {
- return nil, err
- }
- buf = out
- }
- backtick = ""
- backQuote = !backQuote
- continue
- }
- backtick = ""
- backQuote = !backQuote
- }
- case '"':
- if !singleQuoted {
- doubleQuoted = !doubleQuoted
- continue
- }
- case '\'':
- if !doubleQuoted {
- singleQuoted = !singleQuoted
- continue
- }
- }
-
- buf += string(r)
- if backQuote {
- backtick += string(r)
- }
- }
-
- if buf != "" {
- if p.ParseEnv {
- buf = replaceEnv(buf)
- }
- args = append(args, buf)
- }
-
- if escaped || singleQuoted || doubleQuoted || backQuote {
- return nil, errors.New("invalid command line string")
- }
-
- return args, nil
-}
-
-func Parse(line string) ([]string, error) {
- return NewParser().Parse(line)
-}
diff --git a/vendor/src/github.com/mattn/go-shellwords/shellwords_test.go b/vendor/src/github.com/mattn/go-shellwords/shellwords_test.go
deleted file mode 100644
index fb96d33..0000000
--- a/vendor/src/github.com/mattn/go-shellwords/shellwords_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package shellwords
-
-import (
- "os"
- "reflect"
- "testing"
-)
-
-var testcases = []struct {
- line string
- expected []string
-}{
- {`var --bar=baz`, []string{`var`, `--bar=baz`}},
- {`var --bar="baz"`, []string{`var`, `--bar=baz`}},
- {`var "--bar=baz"`, []string{`var`, `--bar=baz`}},
- {`var "--bar='baz'"`, []string{`var`, `--bar='baz'`}},
- {"var --bar=`baz`", []string{`var`, "--bar=`baz`"}},
- {`var "--bar=\"baz'"`, []string{`var`, `--bar="baz'`}},
- {`var "--bar=\'baz\'"`, []string{`var`, `--bar='baz'`}},
- {`var --bar='\'`, []string{`var`, `--bar=\`}},
- {`var "--bar baz"`, []string{`var`, `--bar baz`}},
- {`var --"bar baz"`, []string{`var`, `--bar baz`}},
- {`var --"bar baz"`, []string{`var`, `--bar baz`}},
-}
-
-func TestSimple(t *testing.T) {
- for _, testcase := range testcases {
- args, err := Parse(testcase.line)
- if err != nil {
- t.Fatalf(err.Error())
- }
- if !reflect.DeepEqual(args, testcase.expected) {
- t.Fatalf("Expected %v, but %v:", testcase.expected, args)
- }
- }
-}
-
-func TestError(t *testing.T) {
- _, err := Parse("foo '")
- if err == nil {
- t.Fatalf("Should be an error")
- }
- _, err = Parse(`foo "`)
- if err == nil {
- t.Fatalf("Should be an error")
- }
-
- _, err = Parse("foo `")
- if err == nil {
- t.Fatalf("Should be an error")
- }
-}
-
-func TestBacktick(t *testing.T) {
- goversion, err := shellRun("go version")
- if err != nil {
- t.Fatalf(err.Error())
- }
-
- parser := NewParser()
- parser.ParseBacktick = true
- args, err := parser.Parse("echo `go version`")
- if err != nil {
- t.Fatalf(err.Error())
- }
- expected := []string{"echo", goversion}
- if !reflect.DeepEqual(args, expected) {
- t.Fatalf("Expected %v, but %v:", expected, args)
- }
-}
-
-func TestBacktickError(t *testing.T) {
- parser := NewParser()
- parser.ParseBacktick = true
- _, err := parser.Parse("echo `go Version`")
- if err == nil {
- t.Fatalf("Should be an error")
- }
-}
-
-func TestEnv(t *testing.T) {
- os.Setenv("FOO", "bar")
-
- parser := NewParser()
- parser.ParseEnv = true
- args, err := parser.Parse("echo $FOO")
- if err != nil {
- t.Fatalf(err.Error())
- }
- expected := []string{"echo", "bar"}
- if !reflect.DeepEqual(args, expected) {
- t.Fatalf("Expected %v, but %v:", expected, args)
- }
-}
-
-func TestNoEnv(t *testing.T) {
- parser := NewParser()
- parser.ParseEnv = true
- args, err := parser.Parse("echo $BAR")
- if err != nil {
- t.Fatalf(err.Error())
- }
- expected := []string{"echo", ""}
- if !reflect.DeepEqual(args, expected) {
- t.Fatalf("Expected %v, but %v:", expected, args)
- }
-}
-
-func TestDupEnv(t *testing.T) {
- os.Setenv("FOO", "bar")
- os.Setenv("FOO_BAR", "baz")
-
- parser := NewParser()
- parser.ParseEnv = true
- args, err := parser.Parse("echo $$FOO$")
- if err != nil {
- t.Fatalf(err.Error())
- }
- expected := []string{"echo", "$bar$"}
- if !reflect.DeepEqual(args, expected) {
- t.Fatalf("Expected %v, but %v:", expected, args)
- }
-
- args, err = parser.Parse("echo $${FOO_BAR}$")
- if err != nil {
- t.Fatalf(err.Error())
- }
- expected = []string{"echo", "$baz$"}
- if !reflect.DeepEqual(args, expected) {
- t.Fatalf("Expected %v, but %v:", expected, args)
- }
-}
diff --git a/vendor/src/github.com/mattn/go-shellwords/util_posix.go b/vendor/src/github.com/mattn/go-shellwords/util_posix.go
deleted file mode 100644
index 4f8ac55..0000000
--- a/vendor/src/github.com/mattn/go-shellwords/util_posix.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build !windows
-
-package shellwords
-
-import (
- "errors"
- "os"
- "os/exec"
- "strings"
-)
-
-func shellRun(line string) (string, error) {
- shell := os.Getenv("SHELL")
- b, err := exec.Command(shell, "-c", line).Output()
- if err != nil {
- return "", errors.New(err.Error() + ":" + string(b))
- }
- return strings.TrimSpace(string(b)), nil
-}
diff --git a/vendor/src/github.com/mattn/go-shellwords/util_windows.go b/vendor/src/github.com/mattn/go-shellwords/util_windows.go
deleted file mode 100644
index 7cad4cf..0000000
--- a/vendor/src/github.com/mattn/go-shellwords/util_windows.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package shellwords
-
-import (
- "errors"
- "os"
- "os/exec"
- "strings"
-)
-
-func shellRun(line string) (string, error) {
- shell := os.Getenv("COMSPEC")
- b, err := exec.Command(shell, "/c", line).Output()
- if err != nil {
- return "", errors.New(err.Error() + ":" + string(b))
- }
- return strings.TrimSpace(string(b)), nil
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/LICENSE b/vendor/src/github.com/mattn/go-sqlite3/LICENSE
deleted file mode 100644
index ca458bb..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Yasuhiro Matsumoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/src/github.com/mattn/go-sqlite3/README.md b/vendor/src/github.com/mattn/go-sqlite3/README.md
deleted file mode 100644
index c2e0d5a..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/README.md
+++ /dev/null
@@ -1,81 +0,0 @@
-go-sqlite3
-==========
-
-[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3)
-[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master)
-[![GoDoc](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3)
-
-Description
------------
-
-sqlite3 driver conforming to the built-in database/sql interface
-
-Installation
-------------
-
-This package can be installed with the go get command:
-
- go get github.com/mattn/go-sqlite3
-
-_go-sqlite3_ is *cgo* package.
-If you want to build your app using go-sqlite3, you need gcc.
-However, if you install _go-sqlite3_ with `go install github.com/mattn/go-sqlite3`, you don't need gcc to build your app anymore.
-
-Documentation
--------------
-
-API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3
-
-Examples can be found under the `./_example` directory
-
-FAQ
----
-
-* Want to build go-sqlite3 with libsqlite3 on my linux.
-
- Use `go build --tags "libsqlite3 linux"`
-
-* Want to build go-sqlite3 with libsqlite3 on OS X.
-
- Install sqlite3 from homebrew: `brew install sqlite3`
-
- Use `go build --tags "libsqlite3 darwin"`
-
-* Want to build go-sqlite3 with icu extension.
-
- Use `go build --tags "icu"`
-
-* Can't build go-sqlite3 on windows 64bit.
-
- > Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit.
- > See: https://github.com/mattn/go-sqlite3/issues/27
-
-* Getting insert error while query is opened.
-
- > You can pass some arguments into the connection string, for example, a URI.
- > See: https://github.com/mattn/go-sqlite3/issues/39
-
-* Do you want to cross compile? mingw on Linux or Mac?
-
- > See: https://github.com/mattn/go-sqlite3/issues/106
- > See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html
-
-* Want to get time.Time with current locale
-
- Use `loc=auto` in SQLite3 filename schema like `file:foo.db?loc=auto`.
-
-License
--------
-
-MIT: http://mattn.mit-license.org/2012
-
-sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h
-
-The -binding suffix was added to avoid build failures under gccgo.
-
-In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3.
-
-Author
-------
-
-Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/custom_func/main.go b/vendor/src/github.com/mattn/go-sqlite3/_example/custom_func/main.go
deleted file mode 100644
index 85657e6..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/custom_func/main.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package main
-
-import (
- "database/sql"
- "fmt"
- "log"
- "math"
- "math/rand"
-
- sqlite "github.com/mattn/go-sqlite3"
-)
-
-// Computes x^y
-func pow(x, y int64) int64 {
- return int64(math.Pow(float64(x), float64(y)))
-}
-
-// Computes the bitwise exclusive-or of all its arguments
-func xor(xs ...int64) int64 {
- var ret int64
- for _, x := range xs {
- ret ^= x
- }
- return ret
-}
-
-// Returns a random number. It's actually deterministic here because
-// we don't seed the RNG, but it's an example of a non-pure function
-// from SQLite's POV.
-func getrand() int64 {
- return rand.Int63()
-}
-
-// Computes the standard deviation of a GROUPed BY set of values
-type stddev struct {
- xs []int64
- // Running average calculation
- sum int64
- n int64
-}
-
-func newStddev() *stddev { return &stddev{} }
-
-func (s *stddev) Step(x int64) {
- s.xs = append(s.xs, x)
- s.sum += x
- s.n++
-}
-
-func (s *stddev) Done() float64 {
- mean := float64(s.sum) / float64(s.n)
- var sqDiff []float64
- for _, x := range s.xs {
- sqDiff = append(sqDiff, math.Pow(float64(x)-mean, 2))
- }
- var dev float64
- for _, x := range sqDiff {
- dev += x
- }
- dev /= float64(len(sqDiff))
- return math.Sqrt(dev)
-}
-
-func main() {
- sql.Register("sqlite3_custom", &sqlite.SQLiteDriver{
- ConnectHook: func(conn *sqlite.SQLiteConn) error {
- if err := conn.RegisterFunc("pow", pow, true); err != nil {
- return err
- }
- if err := conn.RegisterFunc("xor", xor, true); err != nil {
- return err
- }
- if err := conn.RegisterFunc("rand", getrand, false); err != nil {
- return err
- }
- if err := conn.RegisterAggregator("stddev", newStddev, true); err != nil {
- return err
- }
- return nil
- },
- })
-
- db, err := sql.Open("sqlite3_custom", ":memory:")
- if err != nil {
- log.Fatal("Failed to open database:", err)
- }
- defer db.Close()
-
- var i int64
- err = db.QueryRow("SELECT pow(2,3)").Scan(&i)
- if err != nil {
- log.Fatal("POW query error:", err)
- }
- fmt.Println("pow(2,3) =", i) // 8
-
- err = db.QueryRow("SELECT xor(1,2,3,4,5,6)").Scan(&i)
- if err != nil {
- log.Fatal("XOR query error:", err)
- }
- fmt.Println("xor(1,2,3,4,5) =", i) // 7
-
- err = db.QueryRow("SELECT rand()").Scan(&i)
- if err != nil {
- log.Fatal("RAND query error:", err)
- }
- fmt.Println("rand() =", i) // pseudorandom
-
- _, err = db.Exec("create table foo (department integer, profits integer)")
- if err != nil {
- log.Fatal("Failed to create table:", err)
- }
- _, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115)")
- if err != nil {
- log.Fatal("Failed to insert records:", err)
- }
-
- rows, err := db.Query("select department, stddev(profits) from foo group by department")
- if err != nil {
- log.Fatal("STDDEV query error:", err)
- }
- defer rows.Close()
- for rows.Next() {
- var dept int64
- var dev float64
- if err := rows.Scan(&dept, &dev); err != nil {
- log.Fatal(err)
- }
- fmt.Printf("dept=%d stddev=%f\n", dept, dev)
- }
- if err := rows.Err(); err != nil {
- log.Fatal(err)
- }
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/hook/hook.go b/vendor/src/github.com/mattn/go-sqlite3/_example/hook/hook.go
deleted file mode 100644
index 3059f9e..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/hook/hook.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package main
-
-import (
- "database/sql"
- "github.com/mattn/go-sqlite3"
- "log"
- "os"
-)
-
-func main() {
- sqlite3conn := []*sqlite3.SQLiteConn{}
- sql.Register("sqlite3_with_hook_example",
- &sqlite3.SQLiteDriver{
- ConnectHook: func(conn *sqlite3.SQLiteConn) error {
- sqlite3conn = append(sqlite3conn, conn)
- return nil
- },
- })
- os.Remove("./foo.db")
- os.Remove("./bar.db")
-
- destDb, err := sql.Open("sqlite3_with_hook_example", "./foo.db")
- if err != nil {
- log.Fatal(err)
- }
- defer destDb.Close()
- destDb.Ping()
-
- _, err = destDb.Exec("create table foo(id int, value text)")
- if err != nil {
- log.Fatal(err)
- }
- _, err = destDb.Exec("insert into foo values(1, 'foo')")
- if err != nil {
- log.Fatal(err)
- }
- _, err = destDb.Exec("insert into foo values(2, 'bar')")
- if err != nil {
- log.Fatal(err)
- }
- _, err = destDb.Query("select * from foo")
- if err != nil {
- log.Fatal(err)
- }
- srcDb, err := sql.Open("sqlite3_with_hook_example", "./bar.db")
- if err != nil {
- log.Fatal(err)
- }
- defer srcDb.Close()
- srcDb.Ping()
-
- bk, err := sqlite3conn[1].Backup("main", sqlite3conn[0], "main")
- if err != nil {
- log.Fatal(err)
- }
-
- _, err = bk.Step(-1)
- if err != nil {
- log.Fatal(err)
- }
- _, err = destDb.Query("select * from foo")
- if err != nil {
- log.Fatal(err)
- }
- _, err = destDb.Exec("insert into foo values(3, 'bar')")
- if err != nil {
- log.Fatal(err)
- }
-
- bk.Finish()
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile b/vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile
deleted file mode 100644
index 97b1e0f..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile
+++ /dev/null
@@ -1,22 +0,0 @@
-ifeq ($(OS),Windows_NT)
-EXE=extension.exe
-EXT=sqlite3_mod_regexp.dll
-RM=cmd /c del
-LDFLAG=
-else
-EXE=extension
-EXT=sqlite3_mod_regexp.so
-RM=rm
-LDFLAG=-fPIC
-endif
-
-all : $(EXE) $(EXT)
-
-$(EXE) : extension.go
- go build $<
-
-$(EXT) : sqlite3_mod_regexp.c
- gcc $(LDFLAG) -shared -o $@ $< -lsqlite3 -lpcre
-
-clean :
- @-$(RM) $(EXE) $(EXT)
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go b/vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go
deleted file mode 100644
index 61ceb55..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package main
-
-import (
- "database/sql"
- "fmt"
- "github.com/mattn/go-sqlite3"
- "log"
-)
-
-func main() {
- sql.Register("sqlite3_with_extensions",
- &sqlite3.SQLiteDriver{
- Extensions: []string{
- "sqlite3_mod_regexp",
- },
- })
-
- db, err := sql.Open("sqlite3_with_extensions", ":memory:")
- if err != nil {
- log.Fatal(err)
- }
- defer db.Close()
-
- // Force db to make a new connection in pool
- // by putting the original in a transaction
- tx, err := db.Begin()
- if err != nil {
- log.Fatal(err)
- }
- defer tx.Commit()
-
- // New connection works (hopefully!)
- rows, err := db.Query("select 'hello world' where 'hello world' regexp '^hello.*d$'")
- if err != nil {
- log.Fatal(err)
- }
- defer rows.Close()
- for rows.Next() {
- var helloworld string
- rows.Scan(&helloworld)
- fmt.Println(helloworld)
- }
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c b/vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c
deleted file mode 100644
index 277764d..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c
+++ /dev/null
@@ -1,31 +0,0 @@
-#include
-#include
-#include
-#include
-
-SQLITE_EXTENSION_INIT1
-static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
- if (argc >= 2) {
- const char *target = (const char *)sqlite3_value_text(argv[1]);
- const char *pattern = (const char *)sqlite3_value_text(argv[0]);
- const char* errstr = NULL;
- int erroff = 0;
- int vec[500];
- int n, rc;
- pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
- rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
- if (rc <= 0) {
- sqlite3_result_error(context, errstr, 0);
- return;
- }
- sqlite3_result_int(context, 1);
- }
-}
-
-#ifdef _WIN32
-__declspec(dllexport)
-#endif
-int sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) {
- SQLITE_EXTENSION_INIT2(api);
- return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, (void*)db, regexp_func, NULL, NULL);
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile b/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile
deleted file mode 100644
index cdd4853..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-ifeq ($(OS),Windows_NT)
-EXE=extension.exe
-EXT=sqlite3_mod_vtable.dll
-RM=cmd /c del
-LIBCURL=-lcurldll
-LDFLAG=
-else
-EXE=extension
-EXT=sqlite3_mod_vtable.so
-RM=rm
-LDFLAG=-fPIC
-LIBCURL=-lcurl
-endif
-
-all : $(EXE) $(EXT)
-
-$(EXE) : extension.go
- go build $<
-
-$(EXT) : sqlite3_mod_vtable.cc
- g++ $(LDFLAG) -shared -o $@ $< -lsqlite3 $(LIBCURL)
-
-clean :
- @-$(RM) $(EXE) $(EXT)
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go b/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go
deleted file mode 100644
index 69ae2c7..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package main
-
-import (
- "database/sql"
- "fmt"
- "github.com/mattn/go-sqlite3"
- "log"
-)
-
-func main() {
- sql.Register("sqlite3_with_extensions",
- &sqlite3.SQLiteDriver{
- Extensions: []string{
- "sqlite3_mod_vtable",
- },
- })
-
- db, err := sql.Open("sqlite3_with_extensions", ":memory:")
- if err != nil {
- log.Fatal(err)
- }
- defer db.Close()
-
- db.Exec("create virtual table repo using github(id, full_name, description, html_url)")
-
- rows, err := db.Query("select id, full_name, description, html_url from repo")
- if err != nil {
- log.Fatal(err)
- }
- defer rows.Close()
- for rows.Next() {
- var id, full_name, description, html_url string
- rows.Scan(&id, &full_name, &description, &html_url)
- fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, full_name, description, html_url)
- }
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h b/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h
deleted file mode 100644
index 2142647..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h
+++ /dev/null
@@ -1,1040 +0,0 @@
-/*
- * Copyright 2009-2010 Cybozu Labs, Inc.
- * Copyright 2011 Kazuho Oku
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY CYBOZU LABS, INC. ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
- * EVENT SHALL CYBOZU LABS, INC. OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
- * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of Cybozu Labs, Inc.
- *
- */
-#ifndef picojson_h
-#define picojson_h
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#ifdef _MSC_VER
- #define SNPRINTF _snprintf_s
- #pragma warning(push)
- #pragma warning(disable : 4244) // conversion from int to char
-#else
- #define SNPRINTF snprintf
-#endif
-
-namespace picojson {
-
- enum {
- null_type,
- boolean_type,
- number_type,
- string_type,
- array_type,
- object_type
- };
-
- struct null {};
-
- class value {
- public:
- typedef std::vector array;
- typedef std::map object;
- union _storage {
- bool boolean_;
- double number_;
- std::string* string_;
- array* array_;
- object* object_;
- };
- protected:
- int type_;
- _storage u_;
- public:
- value();
- value(int type, bool);
- explicit value(bool b);
- explicit value(double n);
- explicit value(const std::string& s);
- explicit value(const array& a);
- explicit value(const object& o);
- explicit value(const char* s);
- value(const char* s, size_t len);
- ~value();
- value(const value& x);
- value& operator=(const value& x);
- void swap(value& x);
- template bool is() const;
- template const T& get() const;
- template T& get();
- bool evaluate_as_boolean() const;
- const value& get(size_t idx) const;
- const value& get(const std::string& key) const;
- bool contains(size_t idx) const;
- bool contains(const std::string& key) const;
- std::string to_str() const;
- template void serialize(Iter os) const;
- std::string serialize() const;
- private:
- template value(const T*); // intentionally defined to block implicit conversion of pointer to bool
- };
-
- typedef value::array array;
- typedef value::object object;
-
- inline value::value() : type_(null_type) {}
-
- inline value::value(int type, bool) : type_(type) {
- switch (type) {
-#define INIT(p, v) case p##type: u_.p = v; break
- INIT(boolean_, false);
- INIT(number_, 0.0);
- INIT(string_, new std::string());
- INIT(array_, new array());
- INIT(object_, new object());
-#undef INIT
- default: break;
- }
- }
-
- inline value::value(bool b) : type_(boolean_type) {
- u_.boolean_ = b;
- }
-
- inline value::value(double n) : type_(number_type) {
- u_.number_ = n;
- }
-
- inline value::value(const std::string& s) : type_(string_type) {
- u_.string_ = new std::string(s);
- }
-
- inline value::value(const array& a) : type_(array_type) {
- u_.array_ = new array(a);
- }
-
- inline value::value(const object& o) : type_(object_type) {
- u_.object_ = new object(o);
- }
-
- inline value::value(const char* s) : type_(string_type) {
- u_.string_ = new std::string(s);
- }
-
- inline value::value(const char* s, size_t len) : type_(string_type) {
- u_.string_ = new std::string(s, len);
- }
-
- inline value::~value() {
- switch (type_) {
-#define DEINIT(p) case p##type: delete u_.p; break
- DEINIT(string_);
- DEINIT(array_);
- DEINIT(object_);
-#undef DEINIT
- default: break;
- }
- }
-
- inline value::value(const value& x) : type_(x.type_) {
- switch (type_) {
-#define INIT(p, v) case p##type: u_.p = v; break
- INIT(string_, new std::string(*x.u_.string_));
- INIT(array_, new array(*x.u_.array_));
- INIT(object_, new object(*x.u_.object_));
-#undef INIT
- default:
- u_ = x.u_;
- break;
- }
- }
-
- inline value& value::operator=(const value& x) {
- if (this != &x) {
- this->~value();
- new (this) value(x);
- }
- return *this;
- }
-
- inline void value::swap(value& x) {
- std::swap(type_, x.type_);
- std::swap(u_, x.u_);
- }
-
-#define IS(ctype, jtype) \
- template <> inline bool value::is() const { \
- return type_ == jtype##_type; \
- }
- IS(null, null)
- IS(bool, boolean)
- IS(int, number)
- IS(double, number)
- IS(std::string, string)
- IS(array, array)
- IS(object, object)
-#undef IS
-
-#define GET(ctype, var) \
- template <> inline const ctype& value::get() const { \
- assert("type mismatch! call vis() before get()" \
- && is()); \
- return var; \
- } \
- template <> inline ctype& value::get() { \
- assert("type mismatch! call is() before get()" \
- && is()); \
- return var; \
- }
- GET(bool, u_.boolean_)
- GET(double, u_.number_)
- GET(std::string, *u_.string_)
- GET(array, *u_.array_)
- GET(object, *u_.object_)
-#undef GET
-
- inline bool value::evaluate_as_boolean() const {
- switch (type_) {
- case null_type:
- return false;
- case boolean_type:
- return u_.boolean_;
- case number_type:
- return u_.number_ != 0;
- case string_type:
- return ! u_.string_->empty();
- default:
- return true;
- }
- }
-
- inline const value& value::get(size_t idx) const {
- static value s_null;
- assert(is());
- return idx < u_.array_->size() ? (*u_.array_)[idx] : s_null;
- }
-
- inline const value& value::get(const std::string& key) const {
- static value s_null;
- assert(is());
- object::const_iterator i = u_.object_->find(key);
- return i != u_.object_->end() ? i->second : s_null;
- }
-
- inline bool value::contains(size_t idx) const {
- assert(is());
- return idx < u_.array_->size();
- }
-
- inline bool value::contains(const std::string& key) const {
- assert(is());
- object::const_iterator i = u_.object_->find(key);
- return i != u_.object_->end();
- }
-
- inline std::string value::to_str() const {
- switch (type_) {
- case null_type: return "null";
- case boolean_type: return u_.boolean_ ? "true" : "false";
- case number_type: {
- char buf[256];
- double tmp;
- SNPRINTF(buf, sizeof(buf), fabs(u_.number_) < (1ULL << 53) && modf(u_.number_, &tmp) == 0 ? "%.f" : "%.17g", u_.number_);
- return buf;
- }
- case string_type: return *u_.string_;
- case array_type: return "array";
- case object_type: return "object";
- default: assert(0);
-#ifdef _MSC_VER
- __assume(0);
-#endif
- }
- return std::string();
- }
-
- template void copy(const std::string& s, Iter oi) {
- std::copy(s.begin(), s.end(), oi);
- }
-
- template void serialize_str(const std::string& s, Iter oi) {
- *oi++ = '"';
- for (std::string::const_iterator i = s.begin(); i != s.end(); ++i) {
- switch (*i) {
-#define MAP(val, sym) case val: copy(sym, oi); break
- MAP('"', "\\\"");
- MAP('\\', "\\\\");
- MAP('/', "\\/");
- MAP('\b', "\\b");
- MAP('\f', "\\f");
- MAP('\n', "\\n");
- MAP('\r', "\\r");
- MAP('\t', "\\t");
-#undef MAP
- default:
- if ((unsigned char)*i < 0x20 || *i == 0x7f) {
- char buf[7];
- SNPRINTF(buf, sizeof(buf), "\\u%04x", *i & 0xff);
- copy(buf, buf + 6, oi);
- } else {
- *oi++ = *i;
- }
- break;
- }
- }
- *oi++ = '"';
- }
-
- template void value::serialize(Iter oi) const {
- switch (type_) {
- case string_type:
- serialize_str(*u_.string_, oi);
- break;
- case array_type: {
- *oi++ = '[';
- for (array::const_iterator i = u_.array_->begin();
- i != u_.array_->end();
- ++i) {
- if (i != u_.array_->begin()) {
- *oi++ = ',';
- }
- i->serialize(oi);
- }
- *oi++ = ']';
- break;
- }
- case object_type: {
- *oi++ = '{';
- for (object::const_iterator i = u_.object_->begin();
- i != u_.object_->end();
- ++i) {
- if (i != u_.object_->begin()) {
- *oi++ = ',';
- }
- serialize_str(i->first, oi);
- *oi++ = ':';
- i->second.serialize(oi);
- }
- *oi++ = '}';
- break;
- }
- default:
- copy(to_str(), oi);
- break;
- }
- }
-
- inline std::string value::serialize() const {
- std::string s;
- serialize(std::back_inserter(s));
- return s;
- }
-
- template class input {
- protected:
- Iter cur_, end_;
- int last_ch_;
- bool ungot_;
- int line_;
- public:
- input(const Iter& first, const Iter& last) : cur_(first), end_(last), last_ch_(-1), ungot_(false), line_(1) {}
- int getc() {
- if (ungot_) {
- ungot_ = false;
- return last_ch_;
- }
- if (cur_ == end_) {
- last_ch_ = -1;
- return -1;
- }
- if (last_ch_ == '\n') {
- line_++;
- }
- last_ch_ = *cur_++ & 0xff;
- return last_ch_;
- }
- void ungetc() {
- if (last_ch_ != -1) {
- assert(! ungot_);
- ungot_ = true;
- }
- }
- Iter cur() const { return cur_; }
- int line() const { return line_; }
- void skip_ws() {
- while (1) {
- int ch = getc();
- if (! (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r')) {
- ungetc();
- break;
- }
- }
- }
- bool expect(int expect) {
- skip_ws();
- if (getc() != expect) {
- ungetc();
- return false;
- }
- return true;
- }
- bool match(const std::string& pattern) {
- for (std::string::const_iterator pi(pattern.begin());
- pi != pattern.end();
- ++pi) {
- if (getc() != *pi) {
- ungetc();
- return false;
- }
- }
- return true;
- }
- };
-
- template inline int _parse_quadhex(input &in) {
- int uni_ch = 0, hex;
- for (int i = 0; i < 4; i++) {
- if ((hex = in.getc()) == -1) {
- return -1;
- }
- if ('0' <= hex && hex <= '9') {
- hex -= '0';
- } else if ('A' <= hex && hex <= 'F') {
- hex -= 'A' - 0xa;
- } else if ('a' <= hex && hex <= 'f') {
- hex -= 'a' - 0xa;
- } else {
- in.ungetc();
- return -1;
- }
- uni_ch = uni_ch * 16 + hex;
- }
- return uni_ch;
- }
-
- template inline bool _parse_codepoint(String& out, input& in) {
- int uni_ch;
- if ((uni_ch = _parse_quadhex(in)) == -1) {
- return false;
- }
- if (0xd800 <= uni_ch && uni_ch <= 0xdfff) {
- if (0xdc00 <= uni_ch) {
- // a second 16-bit of a surrogate pair appeared
- return false;
- }
- // first 16-bit of surrogate pair, get the next one
- if (in.getc() != '\\' || in.getc() != 'u') {
- in.ungetc();
- return false;
- }
- int second = _parse_quadhex(in);
- if (! (0xdc00 <= second && second <= 0xdfff)) {
- return false;
- }
- uni_ch = ((uni_ch - 0xd800) << 10) | ((second - 0xdc00) & 0x3ff);
- uni_ch += 0x10000;
- }
- if (uni_ch < 0x80) {
- out.push_back(uni_ch);
- } else {
- if (uni_ch < 0x800) {
- out.push_back(0xc0 | (uni_ch >> 6));
- } else {
- if (uni_ch < 0x10000) {
- out.push_back(0xe0 | (uni_ch >> 12));
- } else {
- out.push_back(0xf0 | (uni_ch >> 18));
- out.push_back(0x80 | ((uni_ch >> 12) & 0x3f));
- }
- out.push_back(0x80 | ((uni_ch >> 6) & 0x3f));
- }
- out.push_back(0x80 | (uni_ch & 0x3f));
- }
- return true;
- }
-
- template inline bool _parse_string(String& out, input& in) {
- while (1) {
- int ch = in.getc();
- if (ch < ' ') {
- in.ungetc();
- return false;
- } else if (ch == '"') {
- return true;
- } else if (ch == '\\') {
- if ((ch = in.getc()) == -1) {
- return false;
- }
- switch (ch) {
-#define MAP(sym, val) case sym: out.push_back(val); break
- MAP('"', '\"');
- MAP('\\', '\\');
- MAP('/', '/');
- MAP('b', '\b');
- MAP('f', '\f');
- MAP('n', '\n');
- MAP('r', '\r');
- MAP('t', '\t');
-#undef MAP
- case 'u':
- if (! _parse_codepoint(out, in)) {
- return false;
- }
- break;
- default:
- return false;
- }
- } else {
- out.push_back(ch);
- }
- }
- return false;
- }
-
- template inline bool _parse_array(Context& ctx, input& in) {
- if (! ctx.parse_array_start()) {
- return false;
- }
- size_t idx = 0;
- if (in.expect(']')) {
- return ctx.parse_array_stop(idx);
- }
- do {
- if (! ctx.parse_array_item(in, idx)) {
- return false;
- }
- idx++;
- } while (in.expect(','));
- return in.expect(']') && ctx.parse_array_stop(idx);
- }
-
- template inline bool _parse_object(Context& ctx, input& in) {
- if (! ctx.parse_object_start()) {
- return false;
- }
- if (in.expect('}')) {
- return true;
- }
- do {
- std::string key;
- if (! in.expect('"')
- || ! _parse_string(key, in)
- || ! in.expect(':')) {
- return false;
- }
- if (! ctx.parse_object_item(in, key)) {
- return false;
- }
- } while (in.expect(','));
- return in.expect('}');
- }
-
- template inline bool _parse_number(double& out, input& in) {
- std::string num_str;
- while (1) {
- int ch = in.getc();
- if (('0' <= ch && ch <= '9') || ch == '+' || ch == '-' || ch == '.'
- || ch == 'e' || ch == 'E') {
- num_str.push_back(ch);
- } else {
- in.ungetc();
- break;
- }
- }
- char* endp;
- out = strtod(num_str.c_str(), &endp);
- return endp == num_str.c_str() + num_str.size();
- }
-
- template inline bool _parse(Context& ctx, input& in) {
- in.skip_ws();
- int ch = in.getc();
- switch (ch) {
-#define IS(ch, text, op) case ch: \
- if (in.match(text) && op) { \
- return true; \
- } else { \
- return false; \
- }
- IS('n', "ull", ctx.set_null());
- IS('f', "alse", ctx.set_bool(false));
- IS('t', "rue", ctx.set_bool(true));
-#undef IS
- case '"':
- return ctx.parse_string(in);
- case '[':
- return _parse_array(ctx, in);
- case '{':
- return _parse_object(ctx, in);
- default:
- if (('0' <= ch && ch <= '9') || ch == '-') {
- in.ungetc();
- double f;
- if (_parse_number(f, in)) {
- ctx.set_number(f);
- return true;
- } else {
- return false;
- }
- }
- break;
- }
- in.ungetc();
- return false;
- }
-
- class deny_parse_context {
- public:
- bool set_null() { return false; }
- bool set_bool(bool) { return false; }
- bool set_number(double) { return false; }
- template bool parse_string(input&) { return false; }
- bool parse_array_start() { return false; }
- template bool parse_array_item(input&, size_t) {
- return false;
- }
- bool parse_array_stop(size_t) { return false; }
- bool parse_object_start() { return false; }
- template bool parse_object_item(input&, const std::string&) {
- return false;
- }
- };
-
- class default_parse_context {
- protected:
- value* out_;
- public:
- default_parse_context(value* out) : out_(out) {}
- bool set_null() {
- *out_ = value();
- return true;
- }
- bool set_bool(bool b) {
- *out_ = value(b);
- return true;
- }
- bool set_number(double f) {
- *out_ = value(f);
- return true;
- }
- template bool parse_string(input& in) {
- *out_ = value(string_type, false);
- return _parse_string(out_->get(), in);
- }
- bool parse_array_start() {
- *out_ = value(array_type, false);
- return true;
- }
- template bool parse_array_item(input& in, size_t) {
- array& a = out_->get();
- a.push_back(value());
- default_parse_context ctx(&a.back());
- return _parse(ctx, in);
- }
- bool parse_array_stop(size_t) { return true; }
- bool parse_object_start() {
- *out_ = value(object_type, false);
- return true;
- }
- template bool parse_object_item(input& in, const std::string& key) {
- object& o = out_->get();
- default_parse_context ctx(&o[key]);
- return _parse(ctx, in);
- }
- private:
- default_parse_context(const default_parse_context&);
- default_parse_context& operator=(const default_parse_context&);
- };
-
- class null_parse_context {
- public:
- struct dummy_str {
- void push_back(int) {}
- };
- public:
- null_parse_context() {}
- bool set_null() { return true; }
- bool set_bool(bool) { return true; }
- bool set_number(double) { return true; }
- template bool parse_string(input& in) {
- dummy_str s;
- return _parse_string(s, in);
- }
- bool parse_array_start() { return true; }
- template bool parse_array_item(input& in, size_t) {
- return _parse(*this, in);
- }
- bool parse_array_stop(size_t) { return true; }
- bool parse_object_start() { return true; }
- template bool parse_object_item(input& in, const std::string&) {
- return _parse(*this, in);
- }
- private:
- null_parse_context(const null_parse_context&);
- null_parse_context& operator=(const null_parse_context&);
- };
-
- // obsolete, use the version below
- template inline std::string parse(value& out, Iter& pos, const Iter& last) {
- std::string err;
- pos = parse(out, pos, last, &err);
- return err;
- }
-
- template inline Iter _parse(Context& ctx, const Iter& first, const Iter& last, std::string* err) {
- input in(first, last);
- if (! _parse(ctx, in) && err != NULL) {
- char buf[64];
- SNPRINTF(buf, sizeof(buf), "syntax error at line %d near: ", in.line());
- *err = buf;
- while (1) {
- int ch = in.getc();
- if (ch == -1 || ch == '\n') {
- break;
- } else if (ch >= ' ') {
- err->push_back(ch);
- }
- }
- }
- return in.cur();
- }
-
- template inline Iter parse(value& out, const Iter& first, const Iter& last, std::string* err) {
- default_parse_context ctx(&out);
- return _parse(ctx, first, last, err);
- }
-
- inline std::string parse(value& out, std::istream& is) {
- std::string err;
- parse(out, std::istreambuf_iterator(is.rdbuf()),
- std::istreambuf_iterator(), &err);
- return err;
- }
-
- template struct last_error_t {
- static std::string s;
- };
- template std::string last_error_t::s;
-
- inline void set_last_error(const std::string& s) {
- last_error_t::s = s;
- }
-
- inline const std::string& get_last_error() {
- return last_error_t::s;
- }
-
- inline bool operator==(const value& x, const value& y) {
- if (x.is())
- return y.is();
-#define PICOJSON_CMP(type) \
- if (x.is()) \
- return y.is() && x.get() == y.get()
- PICOJSON_CMP(bool);
- PICOJSON_CMP(double);
- PICOJSON_CMP(std::string);
- PICOJSON_CMP(array);
- PICOJSON_CMP(object);
-#undef PICOJSON_CMP
- assert(0);
-#ifdef _MSC_VER
- __assume(0);
-#endif
- return false;
- }
-
- inline bool operator!=(const value& x, const value& y) {
- return ! (x == y);
- }
-}
-
-namespace std {
- template<> inline void swap(picojson::value& x, picojson::value& y)
- {
- x.swap(y);
- }
-}
-
-inline std::istream& operator>>(std::istream& is, picojson::value& x)
-{
- picojson::set_last_error(std::string());
- std::string err = picojson::parse(x, is);
- if (! err.empty()) {
- picojson::set_last_error(err);
- is.setstate(std::ios::failbit);
- }
- return is;
-}
-
-inline std::ostream& operator<<(std::ostream& os, const picojson::value& x)
-{
- x.serialize(std::ostream_iterator(os));
- return os;
-}
-#ifdef _MSC_VER
- #pragma warning(pop)
-#endif
-
-#endif
-#ifdef TEST_PICOJSON
-#ifdef _MSC_VER
- #pragma warning(disable : 4127) // conditional expression is constant
-#endif
-
-using namespace std;
-
-static void plan(int num)
-{
- printf("1..%d\n", num);
-}
-
-static bool success = true;
-
-static void ok(bool b, const char* name = "")
-{
- static int n = 1;
- if (! b)
- success = false;
- printf("%s %d - %s\n", b ? "ok" : "ng", n++, name);
-}
-
-template void is(const T& x, const T& y, const char* name = "")
-{
- if (x == y) {
- ok(true, name);
- } else {
- ok(false, name);
- }
-}
-
-#include
-#include
-#include
-#include
-
-int main(void)
-{
- plan(85);
-
- // constructors
-#define TEST(expr, expected) \
- is(picojson::value expr .serialize(), string(expected), "picojson::value" #expr)
-
- TEST( (true), "true");
- TEST( (false), "false");
- TEST( (42.0), "42");
- TEST( (string("hello")), "\"hello\"");
- TEST( ("hello"), "\"hello\"");
- TEST( ("hello", 4), "\"hell\"");
-
- {
- double a = 1;
- for (int i = 0; i < 1024; i++) {
- picojson::value vi(a);
- std::stringstream ss;
- ss << vi;
- picojson::value vo;
- ss >> vo;
- double b = vo.get();
- if ((i < 53 && a != b) || fabs(a - b) / b > 1e-8) {
- printf("ng i=%d a=%.18e b=%.18e\n", i, a, b);
- }
- a *= 2;
- }
- }
-
-#undef TEST
-
-#define TEST(in, type, cmp, serialize_test) { \
- picojson::value v; \
- const char* s = in; \
- string err = picojson::parse(v, s, s + strlen(s)); \
- ok(err.empty(), in " no error"); \
- ok(v.is(), in " check type"); \
- is(v.get(), cmp, in " correct output"); \
- is(*s, '\0', in " read to eof"); \
- if (serialize_test) { \
- is(v.serialize(), string(in), in " serialize"); \
- } \
- }
- TEST("false", bool, false, true);
- TEST("true", bool, true, true);
- TEST("90.5", double, 90.5, false);
- TEST("1.7976931348623157e+308", double, DBL_MAX, false);
- TEST("\"hello\"", string, string("hello"), true);
- TEST("\"\\\"\\\\\\/\\b\\f\\n\\r\\t\"", string, string("\"\\/\b\f\n\r\t"),
- true);
- TEST("\"\\u0061\\u30af\\u30ea\\u30b9\"", string,
- string("a\xe3\x82\xaf\xe3\x83\xaa\xe3\x82\xb9"), false);
- TEST("\"\\ud840\\udc0b\"", string, string("\xf0\xa0\x80\x8b"), false);
-#undef TEST
-
-#define TEST(type, expr) { \
- picojson::value v; \
- const char *s = expr; \
- string err = picojson::parse(v, s, s + strlen(s)); \
- ok(err.empty(), "empty " #type " no error"); \
- ok(v.is(), "empty " #type " check type"); \
- ok(v.get().empty(), "check " #type " array size"); \
- }
- TEST(array, "[]");
- TEST(object, "{}");
-#undef TEST
-
- {
- picojson::value v;
- const char *s = "[1,true,\"hello\"]";
- string err = picojson::parse(v, s, s + strlen(s));
- ok(err.empty(), "array no error");
- ok(v.is(), "array check type");
- is(v.get().size(), size_t(3), "check array size");
- ok(v.contains(0), "check contains array[0]");
- ok(v.get(0).is(), "check array[0] type");
- is(v.get(0).get(), 1.0, "check array[0] value");
- ok(v.contains(1), "check contains array[1]");
- ok(v.get(1).is(), "check array[1] type");
- ok(v.get(1).get(), "check array[1] value");
- ok(v.contains(2), "check contains array[2]");
- ok(v.get(2).is(), "check array[2] type");
- is(v.get(2).get(), string("hello"), "check array[2] value");
- ok(!v.contains(3), "check not contains array[3]");
- }
-
- {
- picojson::value v;
- const char *s = "{ \"a\": true }";
- string err = picojson::parse(v, s, s + strlen(s));
- ok(err.empty(), "object no error");
- ok(v.is(), "object check type");
- is(v.get().size(), size_t(1), "check object size");
- ok(v.contains("a"), "check contains property");
- ok(v.get("a").is(), "check bool property exists");
- is(v.get("a").get(), true, "check bool property value");
- is(v.serialize(), string("{\"a\":true}"), "serialize object");
- ok(!v.contains("z"), "check not contains property");
- }
-
-#define TEST(json, msg) do { \
- picojson::value v; \
- const char *s = json; \
- string err = picojson::parse(v, s, s + strlen(s)); \
- is(err, string("syntax error at line " msg), msg); \
- } while (0)
- TEST("falsoa", "1 near: oa");
- TEST("{]", "1 near: ]");
- TEST("\n\bbell", "2 near: bell");
- TEST("\"abc\nd\"", "1 near: ");
-#undef TEST
-
- {
- picojson::value v1, v2;
- const char *s;
- string err;
- s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }";
- err = picojson::parse(v1, s, s + strlen(s));
- s = "{ \"d\": 2.0, \"b\": true, \"a\": [1,2,\"three\"] }";
- err = picojson::parse(v2, s, s + strlen(s));
- ok((v1 == v2), "check == operator in deep comparison");
- }
-
- {
- picojson::value v1, v2;
- const char *s;
- string err;
- s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }";
- err = picojson::parse(v1, s, s + strlen(s));
- s = "{ \"d\": 2.0, \"a\": [1,\"three\"], \"b\": true }";
- err = picojson::parse(v2, s, s + strlen(s));
- ok((v1 != v2), "check != operator for array in deep comparison");
- }
-
- {
- picojson::value v1, v2;
- const char *s;
- string err;
- s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }";
- err = picojson::parse(v1, s, s + strlen(s));
- s = "{ \"d\": 2.0, \"a\": [1,2,\"three\"], \"b\": false }";
- err = picojson::parse(v2, s, s + strlen(s));
- ok((v1 != v2), "check != operator for object in deep comparison");
- }
-
- {
- picojson::value v1, v2;
- const char *s;
- string err;
- s = "{ \"b\": true, \"a\": [1,2,\"three\"], \"d\": 2 }";
- err = picojson::parse(v1, s, s + strlen(s));
- picojson::object& o = v1.get();
- o.erase("b");
- picojson::array& a = o["a"].get();
- picojson::array::iterator i;
- i = std::remove(a.begin(), a.end(), picojson::value(std::string("three")));
- a.erase(i, a.end());
- s = "{ \"a\": [1,2], \"d\": 2 }";
- err = picojson::parse(v2, s, s + strlen(s));
- ok((v1 == v2), "check erase()");
- }
-
- ok(picojson::value(3.0).serialize() == "3",
- "integral number should be serialized as a integer");
-
- {
- const char* s = "{ \"a\": [1,2], \"d\": 2 }";
- picojson::null_parse_context ctx;
- string err;
- picojson::_parse(ctx, s, s + strlen(s), &err);
- ok(err.empty(), "null_parse_context");
- }
-
- {
- picojson::value v1, v2;
- v1 = picojson::value(true);
- swap(v1, v2);
- ok(v1.is(), "swap (null)");
- ok(v2.get() == true, "swap (bool)");
-
- v1 = picojson::value("a");
- v2 = picojson::value(1.0);
- swap(v1, v2);
- ok(v1.get() == 1.0, "swap (dobule)");
- ok(v2.get() == "a", "swap (string)");
-
- v1 = picojson::value(picojson::object());
- v2 = picojson::value(picojson::array());
- swap(v1, v2);
- ok(v1.is(), "swap (array)");
- ok(v2.is(), "swap (object)");
- }
-
- return success ? 0 : 1;
-}
-
-#endif
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc b/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc
deleted file mode 100644
index 5bd4e66..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc
+++ /dev/null
@@ -1,238 +0,0 @@
-#include
-#include
-#include
-#include
-#include
-#include "picojson.h"
-
-#ifdef _WIN32
-# define EXPORT __declspec(dllexport)
-#else
-# define EXPORT
-#endif
-
-SQLITE_EXTENSION_INIT1;
-
-typedef struct {
- char* data; // response data from server
- size_t size; // response size of data
-} MEMFILE;
-
-MEMFILE*
-memfopen() {
- MEMFILE* mf = (MEMFILE*) malloc(sizeof(MEMFILE));
- if (mf) {
- mf->data = NULL;
- mf->size = 0;
- }
- return mf;
-}
-
-void
-memfclose(MEMFILE* mf) {
- if (mf->data) free(mf->data);
- free(mf);
-}
-
-size_t
-memfwrite(char* ptr, size_t size, size_t nmemb, void* stream) {
- MEMFILE* mf = (MEMFILE*) stream;
- int block = size * nmemb;
- if (!mf) return block; // through
- if (!mf->data)
- mf->data = (char*) malloc(block);
- else
- mf->data = (char*) realloc(mf->data, mf->size + block);
- if (mf->data) {
- memcpy(mf->data + mf->size, ptr, block);
- mf->size += block;
- }
- return block;
-}
-
-char*
-memfstrdup(MEMFILE* mf) {
- char* buf;
- if (mf->size == 0) return NULL;
- buf = (char*) malloc(mf->size + 1);
- memcpy(buf, mf->data, mf->size);
- buf[mf->size] = 0;
- return buf;
-}
-
-static int
-my_connect(sqlite3 *db, void *pAux, int argc, const char * const *argv, sqlite3_vtab **ppVTab, char **c) {
- std::stringstream ss;
- ss << "CREATE TABLE " << argv[0]
- << "(id int, full_name text, description text, html_url text)";
- int rc = sqlite3_declare_vtab(db, ss.str().c_str());
- *ppVTab = (sqlite3_vtab *) sqlite3_malloc(sizeof(sqlite3_vtab));
- memset(*ppVTab, 0, sizeof(sqlite3_vtab));
- return rc;
-}
-
-static int
-my_create(sqlite3 *db, void *pAux, int argc, const char * const * argv, sqlite3_vtab **ppVTab, char **c) {
- return my_connect(db, pAux, argc, argv, ppVTab, c);
-}
-
-static int my_disconnect(sqlite3_vtab *pVTab) {
- sqlite3_free(pVTab);
- return SQLITE_OK;
-}
-
-static int
-my_destroy(sqlite3_vtab *pVTab) {
- sqlite3_free(pVTab);
- return SQLITE_OK;
-}
-
-typedef struct {
- sqlite3_vtab_cursor base;
- int index;
- picojson::value* rows;
-} cursor;
-
-static int
-my_open(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) {
- MEMFILE* mf;
- CURL* curl;
- char* json;
- CURLcode res = CURLE_OK;
- char error[CURL_ERROR_SIZE] = {0};
- char* cert_file = getenv("SSL_CERT_FILE");
-
- mf = memfopen();
- curl = curl_easy_init();
- curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 1);
- curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 2);
- curl_easy_setopt(curl, CURLOPT_USERAGENT, "curl/7.29.0");
- curl_easy_setopt(curl, CURLOPT_URL, "https://api.github.com/repositories");
- if (cert_file)
- curl_easy_setopt(curl, CURLOPT_CAINFO, cert_file);
- curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
- curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, error);
- curl_easy_setopt(curl, CURLOPT_WRITEDATA, mf);
- curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, memfwrite);
- res = curl_easy_perform(curl);
- curl_easy_cleanup(curl);
- if (res != CURLE_OK) {
- std::cerr << error << std::endl;
- return SQLITE_FAIL;
- }
-
- picojson::value* v = new picojson::value;
- std::string err;
- picojson::parse(*v, mf->data, mf->data + mf->size, &err);
- memfclose(mf);
-
- if (!err.empty()) {
- delete v;
- std::cerr << err << std::endl;
- return SQLITE_FAIL;
- }
-
- cursor *c = (cursor *)sqlite3_malloc(sizeof(cursor));
- c->rows = v;
- c->index = 0;
- *ppCursor = &c->base;
- return SQLITE_OK;
-}
-
-static int
-my_close(cursor *c) {
- delete c->rows;
- sqlite3_free(c);
- return SQLITE_OK;
-}
-
-static int
-my_filter(cursor *c, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) {
- c->index = 0;
- return SQLITE_OK;
-}
-
-static int
-my_next(cursor *c) {
- c->index++;
- return SQLITE_OK;
-}
-
-static int
-my_eof(cursor *c) {
- return c->index >= c->rows->get().size() ? 1 : 0;
-}
-
-static int
-my_column(cursor *c, sqlite3_context *ctxt, int i) {
- picojson::value v = c->rows->get()[c->index];
- picojson::object row = v.get();
- const char* p = NULL;
- switch (i) {
- case 0:
- p = row["id"].to_str().c_str();
- break;
- case 1:
- p = row["full_name"].to_str().c_str();
- break;
- case 2:
- p = row["description"].to_str().c_str();
- break;
- case 3:
- p = row["html_url"].to_str().c_str();
- break;
- }
- sqlite3_result_text(ctxt, strdup(p), strlen(p), free);
- return SQLITE_OK;
-}
-
-static int
-my_rowid(cursor *c, sqlite3_int64 *pRowid) {
- *pRowid = c->index;
- return SQLITE_OK;
-}
-
-static int
-my_bestindex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo) {
- return SQLITE_OK;
-}
-
-static const sqlite3_module module = {
- 0,
- my_create,
- my_connect,
- my_bestindex,
- my_disconnect,
- my_destroy,
- my_open,
- (int (*)(sqlite3_vtab_cursor *)) my_close,
- (int (*)(sqlite3_vtab_cursor *, int, char const *, int, sqlite3_value **)) my_filter,
- (int (*)(sqlite3_vtab_cursor *)) my_next,
- (int (*)(sqlite3_vtab_cursor *)) my_eof,
- (int (*)(sqlite3_vtab_cursor *, sqlite3_context *, int)) my_column,
- (int (*)(sqlite3_vtab_cursor *, sqlite3_int64 *)) my_rowid,
- NULL, // my_update
- NULL, // my_begin
- NULL, // my_sync
- NULL, // my_commit
- NULL, // my_rollback
- NULL, // my_findfunction
- NULL, // my_rename
-};
-
-static void
-destructor(void *arg) {
- return;
-}
-
-
-extern "C" {
-
-EXPORT int
-sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) {
- SQLITE_EXTENSION_INIT2(api);
- sqlite3_create_module_v2(db, "github", &module, NULL, destructor);
- return 0;
-}
-
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/simple/simple.go b/vendor/src/github.com/mattn/go-sqlite3/_example/simple/simple.go
deleted file mode 100644
index 261ed4d..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/simple/simple.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package main
-
-import (
- "database/sql"
- "fmt"
- _ "github.com/mattn/go-sqlite3"
- "log"
- "os"
-)
-
-func main() {
- os.Remove("./foo.db")
-
- db, err := sql.Open("sqlite3", "./foo.db")
- if err != nil {
- log.Fatal(err)
- }
- defer db.Close()
-
- sqlStmt := `
- create table foo (id integer not null primary key, name text);
- delete from foo;
- `
- _, err = db.Exec(sqlStmt)
- if err != nil {
- log.Printf("%q: %s\n", err, sqlStmt)
- return
- }
-
- tx, err := db.Begin()
- if err != nil {
- log.Fatal(err)
- }
- stmt, err := tx.Prepare("insert into foo(id, name) values(?, ?)")
- if err != nil {
- log.Fatal(err)
- }
- defer stmt.Close()
- for i := 0; i < 100; i++ {
- _, err = stmt.Exec(i, fmt.Sprintf("ã“ã‚“ã«ã¡ã‚世界%03d", i))
- if err != nil {
- log.Fatal(err)
- }
- }
- tx.Commit()
-
- rows, err := db.Query("select id, name from foo")
- if err != nil {
- log.Fatal(err)
- }
- defer rows.Close()
- for rows.Next() {
- var id int
- var name string
- err = rows.Scan(&id, &name)
- if err != nil {
- log.Fatal(err)
- }
- fmt.Println(id, name)
- }
- err = rows.Err()
- if err != nil {
- log.Fatal(err)
- }
-
- stmt, err = db.Prepare("select name from foo where id = ?")
- if err != nil {
- log.Fatal(err)
- }
- defer stmt.Close()
- var name string
- err = stmt.QueryRow("3").Scan(&name)
- if err != nil {
- log.Fatal(err)
- }
- fmt.Println(name)
-
- _, err = db.Exec("delete from foo")
- if err != nil {
- log.Fatal(err)
- }
-
- _, err = db.Exec("insert into foo(id, name) values(1, 'foo'), (2, 'bar'), (3, 'baz')")
- if err != nil {
- log.Fatal(err)
- }
-
- rows, err = db.Query("select id, name from foo")
- if err != nil {
- log.Fatal(err)
- }
- defer rows.Close()
- for rows.Next() {
- var id int
- var name string
- err = rows.Scan(&id, &name)
- if err != nil {
- log.Fatal(err)
- }
- fmt.Println(id, name)
- }
- err = rows.Err()
- if err != nil {
- log.Fatal(err)
- }
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/_example/trace/main.go b/vendor/src/github.com/mattn/go-sqlite3/_example/trace/main.go
deleted file mode 100644
index 6a2953f..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/_example/trace/main.go
+++ /dev/null
@@ -1,264 +0,0 @@
-package main
-
-import (
- "database/sql"
- "fmt"
- "log"
- "os"
-
- sqlite3 "github.com/gimpldo/go-sqlite3"
-)
-
-func traceCallback(info sqlite3.TraceInfo) int {
- // Not very readable but may be useful; uncomment next line in case of doubt:
- //fmt.Printf("Trace: %#v\n", info)
-
- var dbErrText string
- if info.DBError.Code != 0 || info.DBError.ExtendedCode != 0 {
- dbErrText = fmt.Sprintf("; DB error: %#v", info.DBError)
- } else {
- dbErrText = "."
- }
-
- // Show the Statement-or-Trigger text in curly braces ('{', '}')
- // since from the *paired* ASCII characters they are
- // the least used in SQL syntax, therefore better visual delimiters.
- // Maybe show 'ExpandedSQL' the same way as 'StmtOrTrigger'.
- //
- // A known use of curly braces (outside strings) is
- // for ODBC escape sequences. Not likely to appear here.
- //
- // Template languages, etc. don't matter, we should see their *result*
- // at *this* level.
- // Strange curly braces in SQL code that reached the database driver
- // suggest that there is a bug in the application.
- // The braces are likely to be either template syntax or
- // a programming language's string interpolation syntax.
-
- var expandedText string
- if info.ExpandedSQL != "" {
- if info.ExpandedSQL == info.StmtOrTrigger {
- expandedText = " = exp"
- } else {
- expandedText = fmt.Sprintf(" expanded {%q}", info.ExpandedSQL)
- }
- } else {
- expandedText = ""
- }
-
- // SQLite docs as of September 6, 2016: Tracing and Profiling Functions
- // https://www.sqlite.org/c3ref/profile.html
- //
- // The profile callback time is in units of nanoseconds, however
- // the current implementation is only capable of millisecond resolution
- // so the six least significant digits in the time are meaningless.
- // Future versions of SQLite might provide greater resolution on the profiler callback.
-
- var runTimeText string
- if info.RunTimeNanosec == 0 {
- if info.EventCode == sqlite3.TraceProfile {
- //runTimeText = "; no time" // seems confusing
- runTimeText = "; time 0" // no measurement unit
- } else {
- //runTimeText = "; no time" // seems useless and confusing
- }
- } else {
- const nanosPerMillisec = 1000000
- if info.RunTimeNanosec%nanosPerMillisec == 0 {
- runTimeText = fmt.Sprintf("; time %d ms", info.RunTimeNanosec/nanosPerMillisec)
- } else {
- // unexpected: better than millisecond resolution
- runTimeText = fmt.Sprintf("; time %d ns!!!", info.RunTimeNanosec)
- }
- }
-
- var modeText string
- if info.AutoCommit {
- modeText = "-AC-"
- } else {
- modeText = "+Tx+"
- }
-
- fmt.Printf("Trace: ev %d %s conn 0x%x, stmt 0x%x {%q}%s%s%s\n",
- info.EventCode, modeText, info.ConnHandle, info.StmtHandle,
- info.StmtOrTrigger, expandedText,
- runTimeText,
- dbErrText)
- return 0
-}
-
-func main() {
- eventMask := sqlite3.TraceStmt | sqlite3.TraceProfile | sqlite3.TraceRow | sqlite3.TraceClose
-
- sql.Register("sqlite3_tracing",
- &sqlite3.SQLiteDriver{
- ConnectHook: func(conn *sqlite3.SQLiteConn) error {
- err := conn.SetTrace(&sqlite3.TraceConfig{
- Callback: traceCallback,
- EventMask: uint(eventMask),
- WantExpandedSQL: true,
- })
- return err
- },
- })
-
- os.Exit(dbMain())
-}
-
-// Harder to do DB work in main().
-// It's better with a separate function because
-// 'defer' and 'os.Exit' don't go well together.
-//
-// DO NOT use 'log.Fatal...' below: remember that it's equivalent to
-// Print() followed by a call to os.Exit(1) --- and
-// we want to avoid Exit() so 'defer' can do cleanup.
-// Use 'log.Panic...' instead.
-
-func dbMain() int {
- db, err := sql.Open("sqlite3_tracing", ":memory:")
- if err != nil {
- fmt.Printf("Failed to open database: %#+v\n", err)
- return 1
- }
- defer db.Close()
-
- err = db.Ping()
- if err != nil {
- log.Panic(err)
- }
-
- dbSetup(db)
-
- dbDoInsert(db)
- dbDoInsertPrepared(db)
- dbDoSelect(db)
- dbDoSelectPrepared(db)
-
- return 0
-}
-
-// 'DDL' stands for "Data Definition Language":
-
-// Note: "INTEGER PRIMARY KEY NOT NULL AUTOINCREMENT" causes the error
-// 'near "AUTOINCREMENT": syntax error'; without "NOT NULL" it works.
-const tableDDL = `CREATE TABLE t1 (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- note VARCHAR NOT NULL
-)`
-
-// 'DML' stands for "Data Manipulation Language":
-
-const insertDML = "INSERT INTO t1 (note) VALUES (?)"
-const selectDML = "SELECT id, note FROM t1 WHERE note LIKE ?"
-
-const textPrefix = "bla-1234567890-"
-const noteTextPattern = "%Prep%"
-
-const nGenRows = 4 // Number of Rows to Generate (for *each* approach tested)
-
-func dbSetup(db *sql.DB) {
- var err error
-
- _, err = db.Exec("DROP TABLE IF EXISTS t1")
- if err != nil {
- log.Panic(err)
- }
- _, err = db.Exec(tableDDL)
- if err != nil {
- log.Panic(err)
- }
-}
-
-func dbDoInsert(db *sql.DB) {
- const Descr = "DB-Exec"
- for i := 0; i < nGenRows; i++ {
- result, err := db.Exec(insertDML, textPrefix+Descr)
- if err != nil {
- log.Panic(err)
- }
-
- resultDoCheck(result, Descr, i)
- }
-}
-
-func dbDoInsertPrepared(db *sql.DB) {
- const Descr = "DB-Prepare"
-
- stmt, err := db.Prepare(insertDML)
- if err != nil {
- log.Panic(err)
- }
- defer stmt.Close()
-
- for i := 0; i < nGenRows; i++ {
- result, err := stmt.Exec(textPrefix + Descr)
- if err != nil {
- log.Panic(err)
- }
-
- resultDoCheck(result, Descr, i)
- }
-}
-
-func resultDoCheck(result sql.Result, callerDescr string, callIndex int) {
- lastID, err := result.LastInsertId()
- if err != nil {
- log.Panic(err)
- }
- nAffected, err := result.RowsAffected()
- if err != nil {
- log.Panic(err)
- }
-
- log.Printf("Exec result for %s (%d): ID = %d, affected = %d\n", callerDescr, callIndex, lastID, nAffected)
-}
-
-func dbDoSelect(db *sql.DB) {
- const Descr = "DB-Query"
-
- rows, err := db.Query(selectDML, noteTextPattern)
- if err != nil {
- log.Panic(err)
- }
- defer rows.Close()
-
- rowsDoFetch(rows, Descr)
-}
-
-func dbDoSelectPrepared(db *sql.DB) {
- const Descr = "DB-Prepare"
-
- stmt, err := db.Prepare(selectDML)
- if err != nil {
- log.Panic(err)
- }
- defer stmt.Close()
-
- rows, err := stmt.Query(noteTextPattern)
- if err != nil {
- log.Panic(err)
- }
- defer rows.Close()
-
- rowsDoFetch(rows, Descr)
-}
-
-func rowsDoFetch(rows *sql.Rows, callerDescr string) {
- var nRows int
- var id int64
- var note string
-
- for rows.Next() {
- err := rows.Scan(&id, ¬e)
- if err != nil {
- log.Panic(err)
- }
- log.Printf("Row for %s (%d): id=%d, note=%q\n",
- callerDescr, nRows, id, note)
- nRows++
- }
- if err := rows.Err(); err != nil {
- log.Panic(err)
- }
- log.Printf("Total %d rows for %s.\n", nRows, callerDescr)
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/backup.go b/vendor/src/github.com/mattn/go-sqlite3/backup.go
deleted file mode 100644
index 4c1e38c..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/backup.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (C) 2014 Yasuhiro Matsumoto .
-//
-// Use of this source code is governed by an MIT-style
-// license that can be found in the LICENSE file.
-
-package sqlite3
-
-/*
-#ifndef USE_LIBSQLITE3
-#include
-#else
-#include
-#endif
-#include
-*/
-import "C"
-import (
- "runtime"
- "unsafe"
-)
-
-type SQLiteBackup struct {
- b *C.sqlite3_backup
-}
-
-func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) {
- destptr := C.CString(dest)
- defer C.free(unsafe.Pointer(destptr))
- srcptr := C.CString(src)
- defer C.free(unsafe.Pointer(srcptr))
-
- if b := C.sqlite3_backup_init(c.db, destptr, conn.db, srcptr); b != nil {
- bb := &SQLiteBackup{b: b}
- runtime.SetFinalizer(bb, (*SQLiteBackup).Finish)
- return bb, nil
- }
- return nil, c.lastError()
-}
-
-// Backs up for one step. Calls the underlying `sqlite3_backup_step` function.
-// This function returns a boolean indicating if the backup is done and
-// an error signalling any other error. Done is returned if the underlying C
-// function returns SQLITE_DONE (Code 101)
-func (b *SQLiteBackup) Step(p int) (bool, error) {
- ret := C.sqlite3_backup_step(b.b, C.int(p))
- if ret == C.SQLITE_DONE {
- return true, nil
- } else if ret != 0 && ret != C.SQLITE_LOCKED && ret != C.SQLITE_BUSY {
- return false, Error{Code: ErrNo(ret)}
- }
- return false, nil
-}
-
-func (b *SQLiteBackup) Remaining() int {
- return int(C.sqlite3_backup_remaining(b.b))
-}
-
-func (b *SQLiteBackup) PageCount() int {
- return int(C.sqlite3_backup_pagecount(b.b))
-}
-
-func (b *SQLiteBackup) Finish() error {
- return b.Close()
-}
-
-func (b *SQLiteBackup) Close() error {
- ret := C.sqlite3_backup_finish(b.b)
- if ret != 0 {
- return Error{Code: ErrNo(ret)}
- }
- b.b = nil
- runtime.SetFinalizer(b, nil)
- return nil
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/backup_test.go b/vendor/src/github.com/mattn/go-sqlite3/backup_test.go
deleted file mode 100644
index 73c0a4b..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/backup_test.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Use of this source code is governed by an MIT-style
-// license that can be found in the LICENSE file.
-
-package sqlite3
-
-import (
- "database/sql"
- "fmt"
- "os"
- "testing"
- "time"
-)
-
-// The number of rows of test data to create in the source database.
-// Can be used to control how many pages are available to be backed up.
-const testRowCount = 100
-
-// The maximum number of seconds after which the page-by-page backup is considered to have taken too long.
-const usePagePerStepsTimeoutSeconds = 30
-
-// Test the backup functionality.
-func testBackup(t *testing.T, testRowCount int, usePerPageSteps bool) {
- // This function will be called multiple times.
- // It uses sql.Register(), which requires the name parameter value to be unique.
- // There does not currently appear to be a way to unregister a registered driver, however.
- // So generate a database driver name that will likely be unique.
- var driverName = fmt.Sprintf("sqlite3_testBackup_%v_%v_%v", testRowCount, usePerPageSteps, time.Now().UnixNano())
-
- // The driver's connection will be needed in order to perform the backup.
- driverConns := []*SQLiteConn{}
- sql.Register(driverName, &SQLiteDriver{
- ConnectHook: func(conn *SQLiteConn) error {
- driverConns = append(driverConns, conn)
- return nil
- },
- })
-
- // Connect to the source database.
- srcTempFilename := TempFilename(t)
- defer os.Remove(srcTempFilename)
- srcDb, err := sql.Open(driverName, srcTempFilename)
- if err != nil {
- t.Fatal("Failed to open the source database:", err)
- }
- defer srcDb.Close()
- err = srcDb.Ping()
- if err != nil {
- t.Fatal("Failed to connect to the source database:", err)
- }
-
- // Connect to the destination database.
- destTempFilename := TempFilename(t)
- defer os.Remove(destTempFilename)
- destDb, err := sql.Open(driverName, destTempFilename)
- if err != nil {
- t.Fatal("Failed to open the destination database:", err)
- }
- defer destDb.Close()
- err = destDb.Ping()
- if err != nil {
- t.Fatal("Failed to connect to the destination database:", err)
- }
-
- // Check the driver connections.
- if len(driverConns) != 2 {
- t.Fatalf("Expected 2 driver connections, but found %v.", len(driverConns))
- }
- srcDbDriverConn := driverConns[0]
- if srcDbDriverConn == nil {
- t.Fatal("The source database driver connection is nil.")
- }
- destDbDriverConn := driverConns[1]
- if destDbDriverConn == nil {
- t.Fatal("The destination database driver connection is nil.")
- }
-
- // Generate some test data for the given ID.
- var generateTestData = func(id int) string {
- return fmt.Sprintf("test-%v", id)
- }
-
- // Populate the source database with a test table containing some test data.
- tx, err := srcDb.Begin()
- if err != nil {
- t.Fatal("Failed to begin a transaction when populating the source database:", err)
- }
- _, err = srcDb.Exec("CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)")
- if err != nil {
- tx.Rollback()
- t.Fatal("Failed to create the source database \"test\" table:", err)
- }
- for id := 0; id < testRowCount; id++ {
- _, err = srcDb.Exec("INSERT INTO test (id, value) VALUES (?, ?)", id, generateTestData(id))
- if err != nil {
- tx.Rollback()
- t.Fatal("Failed to insert a row into the source database \"test\" table:", err)
- }
- }
- err = tx.Commit()
- if err != nil {
- t.Fatal("Failed to populate the source database:", err)
- }
-
- // Confirm that the destination database is initially empty.
- var destTableCount int
- err = destDb.QueryRow("SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'").Scan(&destTableCount)
- if err != nil {
- t.Fatal("Failed to check the destination table count:", err)
- }
- if destTableCount != 0 {
- t.Fatalf("The destination database is not empty; %v table(s) found.", destTableCount)
- }
-
- // Prepare to perform the backup.
- backup, err := destDbDriverConn.Backup("main", srcDbDriverConn, "main")
- if err != nil {
- t.Fatal("Failed to initialize the backup:", err)
- }
-
- // Allow the initial page count and remaining values to be retrieved.
- // According to , the page count and remaining values are "... only updated by sqlite3_backup_step()."
- isDone, err := backup.Step(0)
- if err != nil {
- t.Fatal("Unable to perform an initial 0-page backup step:", err)
- }
- if isDone {
- t.Fatal("Backup is unexpectedly done.")
- }
-
- // Check that the page count and remaining values are reasonable.
- initialPageCount := backup.PageCount()
- if initialPageCount <= 0 {
- t.Fatalf("Unexpected initial page count value: %v", initialPageCount)
- }
- initialRemaining := backup.Remaining()
- if initialRemaining <= 0 {
- t.Fatalf("Unexpected initial remaining value: %v", initialRemaining)
- }
- if initialRemaining != initialPageCount {
- t.Fatalf("Initial remaining value differs from the initial page count value; remaining: %v; page count: %v", initialRemaining, initialPageCount)
- }
-
- // Perform the backup.
- if usePerPageSteps {
- var startTime = time.Now().Unix()
-
- // Test backing-up using a page-by-page approach.
- var latestRemaining = initialRemaining
- for {
- // Perform the backup step.
- isDone, err = backup.Step(1)
- if err != nil {
- t.Fatal("Failed to perform a backup step:", err)
- }
-
- // The page count should remain unchanged from its initial value.
- currentPageCount := backup.PageCount()
- if currentPageCount != initialPageCount {
- t.Fatalf("Current page count differs from the initial page count; initial page count: %v; current page count: %v", initialPageCount, currentPageCount)
- }
-
- // There should now be one less page remaining.
- currentRemaining := backup.Remaining()
- expectedRemaining := latestRemaining - 1
- if currentRemaining != expectedRemaining {
- t.Fatalf("Unexpected remaining value; expected remaining value: %v; actual remaining value: %v", expectedRemaining, currentRemaining)
- }
- latestRemaining = currentRemaining
-
- if isDone {
- break
- }
-
- // Limit the runtime of the backup attempt.
- if (time.Now().Unix() - startTime) > usePagePerStepsTimeoutSeconds {
- t.Fatal("Backup is taking longer than expected.")
- }
- }
- } else {
- // Test the copying of all remaining pages.
- isDone, err = backup.Step(-1)
- if err != nil {
- t.Fatal("Failed to perform a backup step:", err)
- }
- if !isDone {
- t.Fatal("Backup is unexpectedly not done.")
- }
- }
-
- // Check that the page count and remaining values are reasonable.
- finalPageCount := backup.PageCount()
- if finalPageCount != initialPageCount {
- t.Fatalf("Final page count differs from the initial page count; initial page count: %v; final page count: %v", initialPageCount, finalPageCount)
- }
- finalRemaining := backup.Remaining()
- if finalRemaining != 0 {
- t.Fatalf("Unexpected remaining value: %v", finalRemaining)
- }
-
- // Finish the backup.
- err = backup.Finish()
- if err != nil {
- t.Fatal("Failed to finish backup:", err)
- }
-
- // Confirm that the "test" table now exists in the destination database.
- var doesTestTableExist bool
- err = destDb.QueryRow("SELECT EXISTS (SELECT 1 FROM sqlite_master WHERE type = 'table' AND name = 'test' LIMIT 1) AS test_table_exists").Scan(&doesTestTableExist)
- if err != nil {
- t.Fatal("Failed to check if the \"test\" table exists in the destination database:", err)
- }
- if !doesTestTableExist {
- t.Fatal("The \"test\" table could not be found in the destination database.")
- }
-
- // Confirm that the number of rows in the destination database's "test" table matches that of the source table.
- var actualTestTableRowCount int
- err = destDb.QueryRow("SELECT COUNT(*) FROM test").Scan(&actualTestTableRowCount)
- if err != nil {
- t.Fatal("Failed to determine the rowcount of the \"test\" table in the destination database:", err)
- }
- if testRowCount != actualTestTableRowCount {
- t.Fatalf("Unexpected destination \"test\" table row count; expected: %v; found: %v", testRowCount, actualTestTableRowCount)
- }
-
- // Check each of the rows in the destination database.
- for id := 0; id < testRowCount; id++ {
- var checkedValue string
- err = destDb.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&checkedValue)
- if err != nil {
- t.Fatal("Failed to query the \"test\" table in the destination database:", err)
- }
-
- var expectedValue = generateTestData(id)
- if checkedValue != expectedValue {
- t.Fatalf("Unexpected value in the \"test\" table in the destination database; expected value: %v; actual value: %v", expectedValue, checkedValue)
- }
- }
-}
-
-func TestBackupStepByStep(t *testing.T) {
- testBackup(t, testRowCount, true)
-}
-
-func TestBackupAllRemainingPages(t *testing.T) {
- testBackup(t, testRowCount, false)
-}
-
-// Test the error reporting when preparing to perform a backup.
-func TestBackupError(t *testing.T) {
- const driverName = "sqlite3_TestBackupError"
-
- // The driver's connection will be needed in order to perform the backup.
- var dbDriverConn *SQLiteConn
- sql.Register(driverName, &SQLiteDriver{
- ConnectHook: func(conn *SQLiteConn) error {
- dbDriverConn = conn
- return nil
- },
- })
-
- // Connect to the database.
- dbTempFilename := TempFilename(t)
- defer os.Remove(dbTempFilename)
- db, err := sql.Open(driverName, dbTempFilename)
- if err != nil {
- t.Fatal("Failed to open the database:", err)
- }
- defer db.Close()
- db.Ping()
-
- // Need the driver connection in order to perform the backup.
- if dbDriverConn == nil {
- t.Fatal("Failed to get the driver connection.")
- }
-
- // Prepare to perform the backup.
- // Intentionally using the same connection for both the source and destination databases, to trigger an error result.
- backup, err := dbDriverConn.Backup("main", dbDriverConn, "main")
- if err == nil {
- t.Fatal("Failed to get the expected error result.")
- }
- const expectedError = "source and destination must be distinct"
- if err.Error() != expectedError {
- t.Fatalf("Unexpected error message; expected value: \"%v\"; actual value: \"%v\"", expectedError, err.Error())
- }
- if backup != nil {
- t.Fatal("Failed to get the expected nil backup result.")
- }
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/callback.go b/vendor/src/github.com/mattn/go-sqlite3/callback.go
deleted file mode 100644
index 190b695..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/callback.go
+++ /dev/null
@@ -1,340 +0,0 @@
-// Copyright (C) 2014 Yasuhiro Matsumoto .
-//
-// Use of this source code is governed by an MIT-style
-// license that can be found in the LICENSE file.
-
-package sqlite3
-
-// You can't export a Go function to C and have definitions in the C
-// preamble in the same file, so we have to have callbackTrampoline in
-// its own file. Because we need a separate file anyway, the support
-// code for SQLite custom functions is in here.
-
-/*
-#ifndef USE_LIBSQLITE3
-#include
-#else
-#include
-#endif
-#include
-
-void _sqlite3_result_text(sqlite3_context* ctx, const char* s);
-void _sqlite3_result_blob(sqlite3_context* ctx, const void* b, int l);
-*/
-import "C"
-
-import (
- "errors"
- "fmt"
- "math"
- "reflect"
- "sync"
- "unsafe"
-)
-
-//export callbackTrampoline
-func callbackTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
- args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
- fi := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*functionInfo)
- fi.Call(ctx, args)
-}
-
-//export stepTrampoline
-func stepTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
- args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
- ai := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*aggInfo)
- ai.Step(ctx, args)
-}
-
-//export doneTrampoline
-func doneTrampoline(ctx *C.sqlite3_context) {
- handle := uintptr(C.sqlite3_user_data(ctx))
- ai := lookupHandle(handle).(*aggInfo)
- ai.Done(ctx)
-}
-
-// Use handles to avoid passing Go pointers to C.
-
-type handleVal struct {
- db *SQLiteConn
- val interface{}
-}
-
-var handleLock sync.Mutex
-var handleVals = make(map[uintptr]handleVal)
-var handleIndex uintptr = 100
-
-func newHandle(db *SQLiteConn, v interface{}) uintptr {
- handleLock.Lock()
- defer handleLock.Unlock()
- i := handleIndex
- handleIndex++
- handleVals[i] = handleVal{db, v}
- return i
-}
-
-func lookupHandle(handle uintptr) interface{} {
- handleLock.Lock()
- defer handleLock.Unlock()
- r, ok := handleVals[handle]
- if !ok {
- if handle >= 100 && handle < handleIndex {
- panic("deleted handle")
- } else {
- panic("invalid handle")
- }
- }
- return r.val
-}
-
-func deleteHandles(db *SQLiteConn) {
- handleLock.Lock()
- defer handleLock.Unlock()
- for handle, val := range handleVals {
- if val.db == db {
- delete(handleVals, handle)
- }
- }
-}
-
-// This is only here so that tests can refer to it.
-type callbackArgRaw C.sqlite3_value
-
-type callbackArgConverter func(*C.sqlite3_value) (reflect.Value, error)
-
-type callbackArgCast struct {
- f callbackArgConverter
- typ reflect.Type
-}
-
-func (c callbackArgCast) Run(v *C.sqlite3_value) (reflect.Value, error) {
- val, err := c.f(v)
- if err != nil {
- return reflect.Value{}, err
- }
- if !val.Type().ConvertibleTo(c.typ) {
- return reflect.Value{}, fmt.Errorf("cannot convert %s to %s", val.Type(), c.typ)
- }
- return val.Convert(c.typ), nil
-}
-
-func callbackArgInt64(v *C.sqlite3_value) (reflect.Value, error) {
- if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
- return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
- }
- return reflect.ValueOf(int64(C.sqlite3_value_int64(v))), nil
-}
-
-func callbackArgBool(v *C.sqlite3_value) (reflect.Value, error) {
- if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
- return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
- }
- i := int64(C.sqlite3_value_int64(v))
- val := false
- if i != 0 {
- val = true
- }
- return reflect.ValueOf(val), nil
-}
-
-func callbackArgFloat64(v *C.sqlite3_value) (reflect.Value, error) {
- if C.sqlite3_value_type(v) != C.SQLITE_FLOAT {
- return reflect.Value{}, fmt.Errorf("argument must be a FLOAT")
- }
- return reflect.ValueOf(float64(C.sqlite3_value_double(v))), nil
-}
-
-func callbackArgBytes(v *C.sqlite3_value) (reflect.Value, error) {
- switch C.sqlite3_value_type(v) {
- case C.SQLITE_BLOB:
- l := C.sqlite3_value_bytes(v)
- p := C.sqlite3_value_blob(v)
- return reflect.ValueOf(C.GoBytes(p, l)), nil
- case C.SQLITE_TEXT:
- l := C.sqlite3_value_bytes(v)
- c := unsafe.Pointer(C.sqlite3_value_text(v))
- return reflect.ValueOf(C.GoBytes(c, l)), nil
- default:
- return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
- }
-}
-
-func callbackArgString(v *C.sqlite3_value) (reflect.Value, error) {
- switch C.sqlite3_value_type(v) {
- case C.SQLITE_BLOB:
- l := C.sqlite3_value_bytes(v)
- p := (*C.char)(C.sqlite3_value_blob(v))
- return reflect.ValueOf(C.GoStringN(p, l)), nil
- case C.SQLITE_TEXT:
- c := (*C.char)(unsafe.Pointer(C.sqlite3_value_text(v)))
- return reflect.ValueOf(C.GoString(c)), nil
- default:
- return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
- }
-}
-
-func callbackArgGeneric(v *C.sqlite3_value) (reflect.Value, error) {
- switch C.sqlite3_value_type(v) {
- case C.SQLITE_INTEGER:
- return callbackArgInt64(v)
- case C.SQLITE_FLOAT:
- return callbackArgFloat64(v)
- case C.SQLITE_TEXT:
- return callbackArgString(v)
- case C.SQLITE_BLOB:
- return callbackArgBytes(v)
- case C.SQLITE_NULL:
- // Interpret NULL as a nil byte slice.
- var ret []byte
- return reflect.ValueOf(ret), nil
- default:
- panic("unreachable")
- }
-}
-
-func callbackArg(typ reflect.Type) (callbackArgConverter, error) {
- switch typ.Kind() {
- case reflect.Interface:
- if typ.NumMethod() != 0 {
- return nil, errors.New("the only supported interface type is interface{}")
- }
- return callbackArgGeneric, nil
- case reflect.Slice:
- if typ.Elem().Kind() != reflect.Uint8 {
- return nil, errors.New("the only supported slice type is []byte")
- }
- return callbackArgBytes, nil
- case reflect.String:
- return callbackArgString, nil
- case reflect.Bool:
- return callbackArgBool, nil
- case reflect.Int64:
- return callbackArgInt64, nil
- case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
- c := callbackArgCast{callbackArgInt64, typ}
- return c.Run, nil
- case reflect.Float64:
- return callbackArgFloat64, nil
- case reflect.Float32:
- c := callbackArgCast{callbackArgFloat64, typ}
- return c.Run, nil
- default:
- return nil, fmt.Errorf("don't know how to convert to %s", typ)
- }
-}
-
-func callbackConvertArgs(argv []*C.sqlite3_value, converters []callbackArgConverter, variadic callbackArgConverter) ([]reflect.Value, error) {
- var args []reflect.Value
-
- if len(argv) < len(converters) {
- return nil, fmt.Errorf("function requires at least %d arguments", len(converters))
- }
-
- for i, arg := range argv[:len(converters)] {
- v, err := converters[i](arg)
- if err != nil {
- return nil, err
- }
- args = append(args, v)
- }
-
- if variadic != nil {
- for _, arg := range argv[len(converters):] {
- v, err := variadic(arg)
- if err != nil {
- return nil, err
- }
- args = append(args, v)
- }
- }
- return args, nil
-}
-
-type callbackRetConverter func(*C.sqlite3_context, reflect.Value) error
-
-func callbackRetInteger(ctx *C.sqlite3_context, v reflect.Value) error {
- switch v.Type().Kind() {
- case reflect.Int64:
- case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
- v = v.Convert(reflect.TypeOf(int64(0)))
- case reflect.Bool:
- b := v.Interface().(bool)
- if b {
- v = reflect.ValueOf(int64(1))
- } else {
- v = reflect.ValueOf(int64(0))
- }
- default:
- return fmt.Errorf("cannot convert %s to INTEGER", v.Type())
- }
-
- C.sqlite3_result_int64(ctx, C.sqlite3_int64(v.Interface().(int64)))
- return nil
-}
-
-func callbackRetFloat(ctx *C.sqlite3_context, v reflect.Value) error {
- switch v.Type().Kind() {
- case reflect.Float64:
- case reflect.Float32:
- v = v.Convert(reflect.TypeOf(float64(0)))
- default:
- return fmt.Errorf("cannot convert %s to FLOAT", v.Type())
- }
-
- C.sqlite3_result_double(ctx, C.double(v.Interface().(float64)))
- return nil
-}
-
-func callbackRetBlob(ctx *C.sqlite3_context, v reflect.Value) error {
- if v.Type().Kind() != reflect.Slice || v.Type().Elem().Kind() != reflect.Uint8 {
- return fmt.Errorf("cannot convert %s to BLOB", v.Type())
- }
- i := v.Interface()
- if i == nil || len(i.([]byte)) == 0 {
- C.sqlite3_result_null(ctx)
- } else {
- bs := i.([]byte)
- C._sqlite3_result_blob(ctx, unsafe.Pointer(&bs[0]), C.int(len(bs)))
- }
- return nil
-}
-
-func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error {
- if v.Type().Kind() != reflect.String {
- return fmt.Errorf("cannot convert %s to TEXT", v.Type())
- }
- C._sqlite3_result_text(ctx, C.CString(v.Interface().(string)))
- return nil
-}
-
-func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
- switch typ.Kind() {
- case reflect.Slice:
- if typ.Elem().Kind() != reflect.Uint8 {
- return nil, errors.New("the only supported slice type is []byte")
- }
- return callbackRetBlob, nil
- case reflect.String:
- return callbackRetText, nil
- case reflect.Bool, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
- return callbackRetInteger, nil
- case reflect.Float32, reflect.Float64:
- return callbackRetFloat, nil
- default:
- return nil, fmt.Errorf("don't know how to convert to %s", typ)
- }
-}
-
-func callbackError(ctx *C.sqlite3_context, err error) {
- cstr := C.CString(err.Error())
- defer C.free(unsafe.Pointer(cstr))
- C.sqlite3_result_error(ctx, cstr, -1)
-}
-
-// Test support code. Tests are not allowed to import "C", so we can't
-// declare any functions that use C.sqlite3_value.
-func callbackSyntheticForTests(v reflect.Value, err error) callbackArgConverter {
- return func(*C.sqlite3_value) (reflect.Value, error) {
- return v, err
- }
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/callback_test.go b/vendor/src/github.com/mattn/go-sqlite3/callback_test.go
deleted file mode 100644
index 5c61f44..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/callback_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package sqlite3
-
-import (
- "errors"
- "math"
- "reflect"
- "testing"
-)
-
-func TestCallbackArgCast(t *testing.T) {
- intConv := callbackSyntheticForTests(reflect.ValueOf(int64(math.MaxInt64)), nil)
- floatConv := callbackSyntheticForTests(reflect.ValueOf(float64(math.MaxFloat64)), nil)
- errConv := callbackSyntheticForTests(reflect.Value{}, errors.New("test"))
-
- tests := []struct {
- f callbackArgConverter
- o reflect.Value
- }{
- {intConv, reflect.ValueOf(int8(-1))},
- {intConv, reflect.ValueOf(int16(-1))},
- {intConv, reflect.ValueOf(int32(-1))},
- {intConv, reflect.ValueOf(uint8(math.MaxUint8))},
- {intConv, reflect.ValueOf(uint16(math.MaxUint16))},
- {intConv, reflect.ValueOf(uint32(math.MaxUint32))},
- // Special case, int64->uint64 is only 1<<63 - 1, not 1<<64 - 1
- {intConv, reflect.ValueOf(uint64(math.MaxInt64))},
- {floatConv, reflect.ValueOf(float32(math.Inf(1)))},
- }
-
- for _, test := range tests {
- conv := callbackArgCast{test.f, test.o.Type()}
- val, err := conv.Run(nil)
- if err != nil {
- t.Errorf("Couldn't convert to %s: %s", test.o.Type(), err)
- } else if !reflect.DeepEqual(val.Interface(), test.o.Interface()) {
- t.Errorf("Unexpected result from converting to %s: got %v, want %v", test.o.Type(), val.Interface(), test.o.Interface())
- }
- }
-
- conv := callbackArgCast{errConv, reflect.TypeOf(int8(0))}
- _, err := conv.Run(nil)
- if err == nil {
- t.Errorf("Expected error during callbackArgCast, but got none")
- }
-}
-
-func TestCallbackConverters(t *testing.T) {
- tests := []struct {
- v interface{}
- err bool
- }{
- // Unfortunately, we can't tell which converter was returned,
- // but we can at least check which types can be converted.
- {[]byte{0}, false},
- {"text", false},
- {true, false},
- {int8(0), false},
- {int16(0), false},
- {int32(0), false},
- {int64(0), false},
- {uint8(0), false},
- {uint16(0), false},
- {uint32(0), false},
- {uint64(0), false},
- {int(0), false},
- {uint(0), false},
- {float64(0), false},
- {float32(0), false},
-
- {func() {}, true},
- {complex64(complex(0, 0)), true},
- {complex128(complex(0, 0)), true},
- {struct{}{}, true},
- {map[string]string{}, true},
- {[]string{}, true},
- {(*int8)(nil), true},
- {make(chan int), true},
- }
-
- for _, test := range tests {
- _, err := callbackArg(reflect.TypeOf(test.v))
- if test.err && err == nil {
- t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v))
- } else if !test.err && err != nil {
- t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err)
- }
- }
-
- for _, test := range tests {
- _, err := callbackRet(reflect.TypeOf(test.v))
- if test.err && err == nil {
- t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v))
- } else if !test.err && err != nil {
- t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err)
- }
- }
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/doc.go b/vendor/src/github.com/mattn/go-sqlite3/doc.go
deleted file mode 100644
index 030cd93..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/doc.go
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
-Package sqlite3 provides interface to SQLite3 databases.
-
-This works as a driver for database/sql.
-
-Installation
-
- go get github.com/mattn/go-sqlite3
-
-Supported Types
-
-Currently, go-sqlite3 supports the following data types.
-
- +------------------------------+
- |go | sqlite3 |
- |----------|-------------------|
- |nil | null |
- |int | integer |
- |int64 | integer |
- |float64 | float |
- |bool | integer |
- |[]byte | blob |
- |string | text |
- |time.Time | timestamp/datetime|
- +------------------------------+
-
-SQLite3 Extension
-
-You can write your own extension module for sqlite3. For example, below is an
-extension for a Regexp matcher operation.
-
- #include
- #include
- #include
- #include
-
- SQLITE_EXTENSION_INIT1
- static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
- if (argc >= 2) {
- const char *target = (const char *)sqlite3_value_text(argv[1]);
- const char *pattern = (const char *)sqlite3_value_text(argv[0]);
- const char* errstr = NULL;
- int erroff = 0;
- int vec[500];
- int n, rc;
- pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
- rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
- if (rc <= 0) {
- sqlite3_result_error(context, errstr, 0);
- return;
- }
- sqlite3_result_int(context, 1);
- }
- }
-
- #ifdef _WIN32
- __declspec(dllexport)
- #endif
- int sqlite3_extension_init(sqlite3 *db, char **errmsg,
- const sqlite3_api_routines *api) {
- SQLITE_EXTENSION_INIT2(api);
- return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8,
- (void*)db, regexp_func, NULL, NULL);
- }
-
-It needs to be built as a so/dll shared library. And you need to register
-the extension module like below.
-
- sql.Register("sqlite3_with_extensions",
- &sqlite3.SQLiteDriver{
- Extensions: []string{
- "sqlite3_mod_regexp",
- },
- })
-
-Then, you can use this extension.
-
- rows, err := db.Query("select text from mytable where name regexp '^golang'")
-
-Connection Hook
-
-You can hook and inject your code when the connection is established. database/sql
-doesn't provide a way to get native go-sqlite3 interfaces. So if you want,
-you need to set ConnectHook and get the SQLiteConn.
-
- sql.Register("sqlite3_with_hook_example",
- &sqlite3.SQLiteDriver{
- ConnectHook: func(conn *sqlite3.SQLiteConn) error {
- sqlite3conn = append(sqlite3conn, conn)
- return nil
- },
- })
-
-Go SQlite3 Extensions
-
-If you want to register Go functions as SQLite extension functions,
-call RegisterFunction from ConnectHook.
-
- regex = func(re, s string) (bool, error) {
- return regexp.MatchString(re, s)
- }
- sql.Register("sqlite3_with_go_func",
- &sqlite3.SQLiteDriver{
- ConnectHook: func(conn *sqlite3.SQLiteConn) error {
- return conn.RegisterFunc("regexp", regex, true)
- },
- })
-
-See the documentation of RegisterFunc for more details.
-
-*/
-package sqlite3
-
-import "C"
diff --git a/vendor/src/github.com/mattn/go-sqlite3/error.go b/vendor/src/github.com/mattn/go-sqlite3/error.go
deleted file mode 100644
index b910108..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/error.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright (C) 2014 Yasuhiro Matsumoto .
-//
-// Use of this source code is governed by an MIT-style
-// license that can be found in the LICENSE file.
-
-package sqlite3
-
-import "C"
-
-type ErrNo int
-
-const ErrNoMask C.int = 0xff
-
-type ErrNoExtended int
-
-type Error struct {
- Code ErrNo /* The error code returned by SQLite */
- ExtendedCode ErrNoExtended /* The extended error code returned by SQLite */
- err string /* The error string returned by sqlite3_errmsg(),
- this usually contains more specific details. */
-}
-
-// result codes from http://www.sqlite.org/c3ref/c_abort.html
-var (
- ErrError = ErrNo(1) /* SQL error or missing database */
- ErrInternal = ErrNo(2) /* Internal logic error in SQLite */
- ErrPerm = ErrNo(3) /* Access permission denied */
- ErrAbort = ErrNo(4) /* Callback routine requested an abort */
- ErrBusy = ErrNo(5) /* The database file is locked */
- ErrLocked = ErrNo(6) /* A table in the database is locked */
- ErrNomem = ErrNo(7) /* A malloc() failed */
- ErrReadonly = ErrNo(8) /* Attempt to write a readonly database */
- ErrInterrupt = ErrNo(9) /* Operation terminated by sqlite3_interrupt() */
- ErrIoErr = ErrNo(10) /* Some kind of disk I/O error occurred */
- ErrCorrupt = ErrNo(11) /* The database disk image is malformed */
- ErrNotFound = ErrNo(12) /* Unknown opcode in sqlite3_file_control() */
- ErrFull = ErrNo(13) /* Insertion failed because database is full */
- ErrCantOpen = ErrNo(14) /* Unable to open the database file */
- ErrProtocol = ErrNo(15) /* Database lock protocol error */
- ErrEmpty = ErrNo(16) /* Database is empty */
- ErrSchema = ErrNo(17) /* The database schema changed */
- ErrTooBig = ErrNo(18) /* String or BLOB exceeds size limit */
- ErrConstraint = ErrNo(19) /* Abort due to constraint violation */
- ErrMismatch = ErrNo(20) /* Data type mismatch */
- ErrMisuse = ErrNo(21) /* Library used incorrectly */
- ErrNoLFS = ErrNo(22) /* Uses OS features not supported on host */
- ErrAuth = ErrNo(23) /* Authorization denied */
- ErrFormat = ErrNo(24) /* Auxiliary database format error */
- ErrRange = ErrNo(25) /* 2nd parameter to sqlite3_bind out of range */
- ErrNotADB = ErrNo(26) /* File opened that is not a database file */
- ErrNotice = ErrNo(27) /* Notifications from sqlite3_log() */
- ErrWarning = ErrNo(28) /* Warnings from sqlite3_log() */
-)
-
-func (err ErrNo) Error() string {
- return Error{Code: err}.Error()
-}
-
-func (err ErrNo) Extend(by int) ErrNoExtended {
- return ErrNoExtended(int(err) | (by << 8))
-}
-
-func (err ErrNoExtended) Error() string {
- return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error()
-}
-
-func (err Error) Error() string {
- if err.err != "" {
- return err.err
- }
- return errorString(err)
-}
-
-// result codes from http://www.sqlite.org/c3ref/c_abort_rollback.html
-var (
- ErrIoErrRead = ErrIoErr.Extend(1)
- ErrIoErrShortRead = ErrIoErr.Extend(2)
- ErrIoErrWrite = ErrIoErr.Extend(3)
- ErrIoErrFsync = ErrIoErr.Extend(4)
- ErrIoErrDirFsync = ErrIoErr.Extend(5)
- ErrIoErrTruncate = ErrIoErr.Extend(6)
- ErrIoErrFstat = ErrIoErr.Extend(7)
- ErrIoErrUnlock = ErrIoErr.Extend(8)
- ErrIoErrRDlock = ErrIoErr.Extend(9)
- ErrIoErrDelete = ErrIoErr.Extend(10)
- ErrIoErrBlocked = ErrIoErr.Extend(11)
- ErrIoErrNoMem = ErrIoErr.Extend(12)
- ErrIoErrAccess = ErrIoErr.Extend(13)
- ErrIoErrCheckReservedLock = ErrIoErr.Extend(14)
- ErrIoErrLock = ErrIoErr.Extend(15)
- ErrIoErrClose = ErrIoErr.Extend(16)
- ErrIoErrDirClose = ErrIoErr.Extend(17)
- ErrIoErrSHMOpen = ErrIoErr.Extend(18)
- ErrIoErrSHMSize = ErrIoErr.Extend(19)
- ErrIoErrSHMLock = ErrIoErr.Extend(20)
- ErrIoErrSHMMap = ErrIoErr.Extend(21)
- ErrIoErrSeek = ErrIoErr.Extend(22)
- ErrIoErrDeleteNoent = ErrIoErr.Extend(23)
- ErrIoErrMMap = ErrIoErr.Extend(24)
- ErrIoErrGetTempPath = ErrIoErr.Extend(25)
- ErrIoErrConvPath = ErrIoErr.Extend(26)
- ErrLockedSharedCache = ErrLocked.Extend(1)
- ErrBusyRecovery = ErrBusy.Extend(1)
- ErrBusySnapshot = ErrBusy.Extend(2)
- ErrCantOpenNoTempDir = ErrCantOpen.Extend(1)
- ErrCantOpenIsDir = ErrCantOpen.Extend(2)
- ErrCantOpenFullPath = ErrCantOpen.Extend(3)
- ErrCantOpenConvPath = ErrCantOpen.Extend(4)
- ErrCorruptVTab = ErrCorrupt.Extend(1)
- ErrReadonlyRecovery = ErrReadonly.Extend(1)
- ErrReadonlyCantLock = ErrReadonly.Extend(2)
- ErrReadonlyRollback = ErrReadonly.Extend(3)
- ErrReadonlyDbMoved = ErrReadonly.Extend(4)
- ErrAbortRollback = ErrAbort.Extend(2)
- ErrConstraintCheck = ErrConstraint.Extend(1)
- ErrConstraintCommitHook = ErrConstraint.Extend(2)
- ErrConstraintForeignKey = ErrConstraint.Extend(3)
- ErrConstraintFunction = ErrConstraint.Extend(4)
- ErrConstraintNotNull = ErrConstraint.Extend(5)
- ErrConstraintPrimaryKey = ErrConstraint.Extend(6)
- ErrConstraintTrigger = ErrConstraint.Extend(7)
- ErrConstraintUnique = ErrConstraint.Extend(8)
- ErrConstraintVTab = ErrConstraint.Extend(9)
- ErrConstraintRowId = ErrConstraint.Extend(10)
- ErrNoticeRecoverWAL = ErrNotice.Extend(1)
- ErrNoticeRecoverRollback = ErrNotice.Extend(2)
- ErrWarningAutoIndex = ErrWarning.Extend(1)
-)
diff --git a/vendor/src/github.com/mattn/go-sqlite3/error_test.go b/vendor/src/github.com/mattn/go-sqlite3/error_test.go
deleted file mode 100644
index 1ccbe5b..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/error_test.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright (C) 2014 Yasuhiro Matsumoto .
-//
-// Use of this source code is governed by an MIT-style
-// license that can be found in the LICENSE file.
-
-package sqlite3
-
-import (
- "database/sql"
- "io/ioutil"
- "os"
- "path"
- "testing"
-)
-
-func TestSimpleError(t *testing.T) {
- e := ErrError.Error()
- if e != "SQL logic error or missing database" {
- t.Error("wrong error code:" + e)
- }
-}
-
-func TestCorruptDbErrors(t *testing.T) {
- dirName, err := ioutil.TempDir("", "sqlite3")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dirName)
-
- dbFileName := path.Join(dirName, "test.db")
- f, err := os.Create(dbFileName)
- if err != nil {
- t.Error(err)
- }
- f.Write([]byte{1, 2, 3, 4, 5})
- f.Close()
-
- db, err := sql.Open("sqlite3", dbFileName)
- if err == nil {
- _, err = db.Exec("drop table foo")
- }
-
- sqliteErr := err.(Error)
- if sqliteErr.Code != ErrNotADB {
- t.Error("wrong error code for corrupted DB")
- }
- if err.Error() == "" {
- t.Error("wrong error string for corrupted DB")
- }
- db.Close()
-}
-
-func TestSqlLogicErrors(t *testing.T) {
- dirName, err := ioutil.TempDir("", "sqlite3")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dirName)
-
- dbFileName := path.Join(dirName, "test.db")
- db, err := sql.Open("sqlite3", dbFileName)
- if err != nil {
- t.Error(err)
- }
- defer db.Close()
-
- _, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)")
- if err != nil {
- t.Error(err)
- }
-
- const expectedErr = "table Foo already exists"
- _, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)")
- if err.Error() != expectedErr {
- t.Errorf("Unexpected error: %s, expected %s", err.Error(), expectedErr)
- }
-
-}
-
-func TestExtendedErrorCodes_ForeignKey(t *testing.T) {
- dirName, err := ioutil.TempDir("", "sqlite3-err")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dirName)
-
- dbFileName := path.Join(dirName, "test.db")
- db, err := sql.Open("sqlite3", dbFileName)
- if err != nil {
- t.Error(err)
- }
- defer db.Close()
-
- _, err = db.Exec("PRAGMA foreign_keys=ON;")
- if err != nil {
- t.Errorf("PRAGMA foreign_keys=ON: %v", err)
- }
-
- _, err = db.Exec(`CREATE TABLE Foo (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- value INTEGER NOT NULL,
- ref INTEGER NULL REFERENCES Foo (id),
- UNIQUE(value)
- );`)
- if err != nil {
- t.Error(err)
- }
-
- _, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (100, 100);")
- if err == nil {
- t.Error("No error!")
- } else {
- sqliteErr := err.(Error)
- if sqliteErr.Code != ErrConstraint {
- t.Errorf("Wrong basic error code: %d != %d",
- sqliteErr.Code, ErrConstraint)
- }
- if sqliteErr.ExtendedCode != ErrConstraintForeignKey {
- t.Errorf("Wrong extended error code: %d != %d",
- sqliteErr.ExtendedCode, ErrConstraintForeignKey)
- }
- }
-
-}
-
-func TestExtendedErrorCodes_NotNull(t *testing.T) {
- dirName, err := ioutil.TempDir("", "sqlite3-err")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dirName)
-
- dbFileName := path.Join(dirName, "test.db")
- db, err := sql.Open("sqlite3", dbFileName)
- if err != nil {
- t.Error(err)
- }
- defer db.Close()
-
- _, err = db.Exec("PRAGMA foreign_keys=ON;")
- if err != nil {
- t.Errorf("PRAGMA foreign_keys=ON: %v", err)
- }
-
- _, err = db.Exec(`CREATE TABLE Foo (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- value INTEGER NOT NULL,
- ref INTEGER NULL REFERENCES Foo (id),
- UNIQUE(value)
- );`)
- if err != nil {
- t.Error(err)
- }
-
- res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);")
- if err != nil {
- t.Fatalf("Creating first row: %v", err)
- }
-
- id, err := res.LastInsertId()
- if err != nil {
- t.Fatalf("Retrieving last insert id: %v", err)
- }
-
- _, err = db.Exec("INSERT INTO Foo (ref) VALUES (?);", id)
- if err == nil {
- t.Error("No error!")
- } else {
- sqliteErr := err.(Error)
- if sqliteErr.Code != ErrConstraint {
- t.Errorf("Wrong basic error code: %d != %d",
- sqliteErr.Code, ErrConstraint)
- }
- if sqliteErr.ExtendedCode != ErrConstraintNotNull {
- t.Errorf("Wrong extended error code: %d != %d",
- sqliteErr.ExtendedCode, ErrConstraintNotNull)
- }
- }
-
-}
-
-func TestExtendedErrorCodes_Unique(t *testing.T) {
- dirName, err := ioutil.TempDir("", "sqlite3-err")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dirName)
-
- dbFileName := path.Join(dirName, "test.db")
- db, err := sql.Open("sqlite3", dbFileName)
- if err != nil {
- t.Error(err)
- }
- defer db.Close()
-
- _, err = db.Exec("PRAGMA foreign_keys=ON;")
- if err != nil {
- t.Errorf("PRAGMA foreign_keys=ON: %v", err)
- }
-
- _, err = db.Exec(`CREATE TABLE Foo (
- id INTEGER PRIMARY KEY AUTOINCREMENT,
- value INTEGER NOT NULL,
- ref INTEGER NULL REFERENCES Foo (id),
- UNIQUE(value)
- );`)
- if err != nil {
- t.Error(err)
- }
-
- res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);")
- if err != nil {
- t.Fatalf("Creating first row: %v", err)
- }
-
- id, err := res.LastInsertId()
- if err != nil {
- t.Fatalf("Retrieving last insert id: %v", err)
- }
-
- _, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (?, 100);", id)
- if err == nil {
- t.Error("No error!")
- } else {
- sqliteErr := err.(Error)
- if sqliteErr.Code != ErrConstraint {
- t.Errorf("Wrong basic error code: %d != %d",
- sqliteErr.Code, ErrConstraint)
- }
- if sqliteErr.ExtendedCode != ErrConstraintUnique {
- t.Errorf("Wrong extended error code: %d != %d",
- sqliteErr.ExtendedCode, ErrConstraintUnique)
- }
- extended := sqliteErr.Code.Extend(3).Error()
- expected := "constraint failed"
- if extended != expected {
- t.Errorf("Wrong basic error code: %q != %q",
- extended, expected)
- }
- }
-
-}
diff --git a/vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.c
deleted file mode 100644
index 1f085b0..0000000
--- a/vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.c
+++ /dev/null
@@ -1,197850 +0,0 @@
-/******************************************************************************
-** This file is an amalgamation of many separate C source files from SQLite
-** version 3.14.0. By combining all the individual C code files into this
-** single large file, the entire code can be compiled as a single translation
-** unit. This allows many compilers to do optimizations that would not be
-** possible if the files were compiled separately. Performance improvements
-** of 5% or more are commonly seen when SQLite is compiled as a single
-** translation unit.
-**
-** This file is all you need to compile SQLite. To use SQLite in other
-** programs, you need this file and the "sqlite3.h" header file that defines
-** the programming interface to the SQLite library. (If you do not have
-** the "sqlite3.h" header file at hand, you will find a copy embedded within
-** the text of this file. Search for "Begin file sqlite3.h" to find the start
-** of the embedded sqlite3.h header file.) Additional code files may be needed
-** if you want a wrapper to interface SQLite with your choice of programming
-** language. The code for the "sqlite3" command-line shell is also in a
-** separate file. This file contains only code for the core SQLite library.
-*/
-#define SQLITE_CORE 1
-#define SQLITE_AMALGAMATION 1
-#ifndef SQLITE_PRIVATE
-# define SQLITE_PRIVATE static
-#endif
-/************** Begin file sqliteInt.h ***************************************/
-/*
-** 2001 September 15
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** Internal interface definitions for SQLite.
-**
-*/
-#ifndef SQLITEINT_H
-#define SQLITEINT_H
-
-/* Special Comments:
-**
-** Some comments have special meaning to the tools that measure test
-** coverage:
-**
-** NO_TEST - The branches on this line are not
-** measured by branch coverage. This is
-** used on lines of code that actually
-** implement parts of coverage testing.
-**
-** OPTIMIZATION-IF-TRUE - This branch is allowed to alway be false
-** and the correct answer is still obtained,
-** though perhaps more slowly.
-**
-** OPTIMIZATION-IF-FALSE - This branch is allowed to alway be true
-** and the correct answer is still obtained,
-** though perhaps more slowly.
-**
-** PREVENTS-HARMLESS-OVERREAD - This branch prevents a buffer overread
-** that would be harmless and undetectable
-** if it did occur.
-**
-** In all cases, the special comment must be enclosed in the usual
-** slash-asterisk...asterisk-slash comment marks, with no spaces between the
-** asterisks and the comment text.
-*/
-
-/*
-** Make sure the Tcl calling convention macro is defined. This macro is
-** only used by test code and Tcl integration code.
-*/
-#ifndef SQLITE_TCLAPI
-# define SQLITE_TCLAPI
-#endif
-
-/*
-** Make sure that rand_s() is available on Windows systems with MSVC 2005
-** or higher.
-*/
-#if defined(_MSC_VER) && _MSC_VER>=1400
-# define _CRT_RAND_S
-#endif
-
-/*
-** Include the header file used to customize the compiler options for MSVC.
-** This should be done first so that it can successfully prevent spurious
-** compiler warnings due to subsequent content in this file and other files
-** that are included by this file.
-*/
-/************** Include msvc.h in the middle of sqliteInt.h ******************/
-/************** Begin file msvc.h ********************************************/
-/*
-** 2015 January 12
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains code that is specific to MSVC.
-*/
-#ifndef SQLITE_MSVC_H
-#define SQLITE_MSVC_H
-
-#if defined(_MSC_VER)
-#pragma warning(disable : 4054)
-#pragma warning(disable : 4055)
-#pragma warning(disable : 4100)
-#pragma warning(disable : 4127)
-#pragma warning(disable : 4130)
-#pragma warning(disable : 4152)
-#pragma warning(disable : 4189)
-#pragma warning(disable : 4206)
-#pragma warning(disable : 4210)
-#pragma warning(disable : 4232)
-#pragma warning(disable : 4244)
-#pragma warning(disable : 4305)
-#pragma warning(disable : 4306)
-#pragma warning(disable : 4702)
-#pragma warning(disable : 4706)
-#endif /* defined(_MSC_VER) */
-
-#endif /* SQLITE_MSVC_H */
-
-/************** End of msvc.h ************************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-
-/*
-** Special setup for VxWorks
-*/
-/************** Include vxworks.h in the middle of sqliteInt.h ***************/
-/************** Begin file vxworks.h *****************************************/
-/*
-** 2015-03-02
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains code that is specific to Wind River's VxWorks
-*/
-#if defined(__RTP__) || defined(_WRS_KERNEL)
-/* This is VxWorks. Set up things specially for that OS
-*/
-#include
-#include /* amalgamator: dontcache */
-#define OS_VXWORKS 1
-#define SQLITE_OS_OTHER 0
-#define SQLITE_HOMEGROWN_RECURSIVE_MUTEX 1
-#define SQLITE_OMIT_LOAD_EXTENSION 1
-#define SQLITE_ENABLE_LOCKING_STYLE 0
-#define HAVE_UTIME 1
-#else
-/* This is not VxWorks. */
-#define OS_VXWORKS 0
-#define HAVE_FCHOWN 1
-#define HAVE_READLINK 1
-#define HAVE_LSTAT 1
-#endif /* defined(_WRS_KERNEL) */
-
-/************** End of vxworks.h *********************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-
-/*
-** These #defines should enable >2GB file support on POSIX if the
-** underlying operating system supports it. If the OS lacks
-** large file support, or if the OS is windows, these should be no-ops.
-**
-** Ticket #2739: The _LARGEFILE_SOURCE macro must appear before any
-** system #includes. Hence, this block of code must be the very first
-** code in all source files.
-**
-** Large file support can be disabled using the -DSQLITE_DISABLE_LFS switch
-** on the compiler command line. This is necessary if you are compiling
-** on a recent machine (ex: Red Hat 7.2) but you want your code to work
-** on an older machine (ex: Red Hat 6.0). If you compile on Red Hat 7.2
-** without this option, LFS is enable. But LFS does not exist in the kernel
-** in Red Hat 6.0, so the code won't work. Hence, for maximum binary
-** portability you should omit LFS.
-**
-** The previous paragraph was written in 2005. (This paragraph is written
-** on 2008-11-28.) These days, all Linux kernels support large files, so
-** you should probably leave LFS enabled. But some embedded platforms might
-** lack LFS in which case the SQLITE_DISABLE_LFS macro might still be useful.
-**
-** Similar is true for Mac OS X. LFS is only supported on Mac OS X 9 and later.
-*/
-#ifndef SQLITE_DISABLE_LFS
-# define _LARGE_FILE 1
-# ifndef _FILE_OFFSET_BITS
-# define _FILE_OFFSET_BITS 64
-# endif
-# define _LARGEFILE_SOURCE 1
-#endif
-
-/* What version of GCC is being used. 0 means GCC is not being used */
-#ifdef __GNUC__
-# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__)
-#else
-# define GCC_VERSION 0
-#endif
-
-/* Needed for various definitions... */
-#if defined(__GNUC__) && !defined(_GNU_SOURCE)
-# define _GNU_SOURCE
-#endif
-
-#if defined(__OpenBSD__) && !defined(_BSD_SOURCE)
-# define _BSD_SOURCE
-#endif
-
-/*
-** For MinGW, check to see if we can include the header file containing its
-** version information, among other things. Normally, this internal MinGW
-** header file would [only] be included automatically by other MinGW header
-** files; however, the contained version information is now required by this
-** header file to work around binary compatibility issues (see below) and
-** this is the only known way to reliably obtain it. This entire #if block
-** would be completely unnecessary if there was any other way of detecting
-** MinGW via their preprocessor (e.g. if they customized their GCC to define
-** some MinGW-specific macros). When compiling for MinGW, either the
-** _HAVE_MINGW_H or _HAVE__MINGW_H (note the extra underscore) macro must be
-** defined; otherwise, detection of conditions specific to MinGW will be
-** disabled.
-*/
-#if defined(_HAVE_MINGW_H)
-# include "mingw.h"
-#elif defined(_HAVE__MINGW_H)
-# include "_mingw.h"
-#endif
-
-/*
-** For MinGW version 4.x (and higher), check to see if the _USE_32BIT_TIME_T
-** define is required to maintain binary compatibility with the MSVC runtime
-** library in use (e.g. for Windows XP).
-*/
-#if !defined(_USE_32BIT_TIME_T) && !defined(_USE_64BIT_TIME_T) && \
- defined(_WIN32) && !defined(_WIN64) && \
- defined(__MINGW_MAJOR_VERSION) && __MINGW_MAJOR_VERSION >= 4 && \
- defined(__MSVCRT__)
-# define _USE_32BIT_TIME_T
-#endif
-
-/* The public SQLite interface. The _FILE_OFFSET_BITS macro must appear
-** first in QNX. Also, the _USE_32BIT_TIME_T macro must appear first for
-** MinGW.
-*/
-/************** Include sqlite3.h in the middle of sqliteInt.h ***************/
-/************** Begin file sqlite3.h *****************************************/
-/*
-** 2001 September 15
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This header file defines the interface that the SQLite library
-** presents to client programs. If a C-function, structure, datatype,
-** or constant definition does not appear in this file, then it is
-** not a published API of SQLite, is subject to change without
-** notice, and should not be referenced by programs that use SQLite.
-**
-** Some of the definitions that are in this file are marked as
-** "experimental". Experimental interfaces are normally new
-** features recently added to SQLite. We do not anticipate changes
-** to experimental interfaces but reserve the right to make minor changes
-** if experience from use "in the wild" suggest such changes are prudent.
-**
-** The official C-language API documentation for SQLite is derived
-** from comments in this file. This file is the authoritative source
-** on how SQLite interfaces are supposed to operate.
-**
-** The name of this file under configuration management is "sqlite.h.in".
-** The makefile makes some minor changes to this file (such as inserting
-** the version number) and changes its name to "sqlite3.h" as
-** part of the build process.
-*/
-#ifndef SQLITE3_H
-#define SQLITE3_H
-#include /* Needed for the definition of va_list */
-
-/*
-** Make sure we can call this stuff from C++.
-*/
-#if 0
-extern "C" {
-#endif
-
-
-/*
-** Provide the ability to override linkage features of the interface.
-*/
-#ifndef SQLITE_EXTERN
-# define SQLITE_EXTERN extern
-#endif
-#ifndef SQLITE_API
-# define SQLITE_API
-#endif
-#ifndef SQLITE_CDECL
-# define SQLITE_CDECL
-#endif
-#ifndef SQLITE_APICALL
-# define SQLITE_APICALL
-#endif
-#ifndef SQLITE_STDCALL
-# define SQLITE_STDCALL SQLITE_APICALL
-#endif
-#ifndef SQLITE_CALLBACK
-# define SQLITE_CALLBACK
-#endif
-#ifndef SQLITE_SYSAPI
-# define SQLITE_SYSAPI
-#endif
-
-/*
-** These no-op macros are used in front of interfaces to mark those
-** interfaces as either deprecated or experimental. New applications
-** should not use deprecated interfaces - they are supported for backwards
-** compatibility only. Application writers should be aware that
-** experimental interfaces are subject to change in point releases.
-**
-** These macros used to resolve to various kinds of compiler magic that
-** would generate warning messages when they were used. But that
-** compiler magic ended up generating such a flurry of bug reports
-** that we have taken it all out and gone back to using simple
-** noop macros.
-*/
-#define SQLITE_DEPRECATED
-#define SQLITE_EXPERIMENTAL
-
-/*
-** Ensure these symbols were not defined by some previous header file.
-*/
-#ifdef SQLITE_VERSION
-# undef SQLITE_VERSION
-#endif
-#ifdef SQLITE_VERSION_NUMBER
-# undef SQLITE_VERSION_NUMBER
-#endif
-
-/*
-** CAPI3REF: Compile-Time Library Version Numbers
-**
-** ^(The [SQLITE_VERSION] C preprocessor macro in the sqlite3.h header
-** evaluates to a string literal that is the SQLite version in the
-** format "X.Y.Z" where X is the major version number (always 3 for
-** SQLite3) and Y is the minor version number and Z is the release number.)^
-** ^(The [SQLITE_VERSION_NUMBER] C preprocessor macro resolves to an integer
-** with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z are the same
-** numbers used in [SQLITE_VERSION].)^
-** The SQLITE_VERSION_NUMBER for any given release of SQLite will also
-** be larger than the release from which it is derived. Either Y will
-** be held constant and Z will be incremented or else Y will be incremented
-** and Z will be reset to zero.
-**
-** Since version 3.6.18, SQLite source code has been stored in the
-** Fossil configuration management
-** system . ^The SQLITE_SOURCE_ID macro evaluates to
-** a string which identifies a particular check-in of SQLite
-** within its configuration management system. ^The SQLITE_SOURCE_ID
-** string contains the date and time of the check-in (UTC) and an SHA1
-** hash of the entire source tree.
-**
-** See also: [sqlite3_libversion()],
-** [sqlite3_libversion_number()], [sqlite3_sourceid()],
-** [sqlite_version()] and [sqlite_source_id()].
-*/
-#define SQLITE_VERSION "3.14.0"
-#define SQLITE_VERSION_NUMBER 3014000
-#define SQLITE_SOURCE_ID "2016-08-08 13:40:27 d5e98057028abcf7217d0d2b2e29bbbcdf09d6de"
-
-/*
-** CAPI3REF: Run-Time Library Version Numbers
-** KEYWORDS: sqlite3_version, sqlite3_sourceid
-**
-** These interfaces provide the same information as the [SQLITE_VERSION],
-** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros
-** but are associated with the library instead of the header file. ^(Cautious
-** programmers might include assert() statements in their application to
-** verify that values returned by these interfaces match the macros in
-** the header, and thus ensure that the application is
-** compiled with matching library and header files.
-**
-**
-** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
-** assert( strcmp(sqlite3_sourceid(),SQLITE_SOURCE_ID)==0 );
-** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
-** )^
-**
-** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION]
-** macro. ^The sqlite3_libversion() function returns a pointer to the
-** to the sqlite3_version[] string constant. The sqlite3_libversion()
-** function is provided for use in DLLs since DLL users usually do not have
-** direct access to string constants within the DLL. ^The
-** sqlite3_libversion_number() function returns an integer equal to
-** [SQLITE_VERSION_NUMBER]. ^The sqlite3_sourceid() function returns
-** a pointer to a string constant whose value is the same as the
-** [SQLITE_SOURCE_ID] C preprocessor macro.
-**
-** See also: [sqlite_version()] and [sqlite_source_id()].
-*/
-SQLITE_API const char sqlite3_version[] = SQLITE_VERSION;
-SQLITE_API const char *SQLITE_STDCALL sqlite3_libversion(void);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sourceid(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_libversion_number(void);
-
-/*
-** CAPI3REF: Run-Time Library Compilation Options Diagnostics
-**
-** ^The sqlite3_compileoption_used() function returns 0 or 1
-** indicating whether the specified option was defined at
-** compile time. ^The SQLITE_ prefix may be omitted from the
-** option name passed to sqlite3_compileoption_used().
-**
-** ^The sqlite3_compileoption_get() function allows iterating
-** over the list of options that were defined at compile time by
-** returning the N-th compile time option string. ^If N is out of range,
-** sqlite3_compileoption_get() returns a NULL pointer. ^The SQLITE_
-** prefix is omitted from any strings returned by
-** sqlite3_compileoption_get().
-**
-** ^Support for the diagnostic functions sqlite3_compileoption_used()
-** and sqlite3_compileoption_get() may be omitted by specifying the
-** [SQLITE_OMIT_COMPILEOPTION_DIAGS] option at compile time.
-**
-** See also: SQL functions [sqlite_compileoption_used()] and
-** [sqlite_compileoption_get()] and the [compile_options pragma].
-*/
-#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
-SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_compileoption_get(int N);
-#endif
-
-/*
-** CAPI3REF: Test To See If The Library Is Threadsafe
-**
-** ^The sqlite3_threadsafe() function returns zero if and only if
-** SQLite was compiled with mutexing code omitted due to the
-** [SQLITE_THREADSAFE] compile-time option being set to 0.
-**
-** SQLite can be compiled with or without mutexes. When
-** the [SQLITE_THREADSAFE] C preprocessor macro is 1 or 2, mutexes
-** are enabled and SQLite is threadsafe. When the
-** [SQLITE_THREADSAFE] macro is 0,
-** the mutexes are omitted. Without the mutexes, it is not safe
-** to use SQLite concurrently from more than one thread.
-**
-** Enabling mutexes incurs a measurable performance penalty.
-** So if speed is of utmost importance, it makes sense to disable
-** the mutexes. But for maximum safety, mutexes should be enabled.
-** ^The default behavior is for mutexes to be enabled.
-**
-** This interface can be used by an application to make sure that the
-** version of SQLite that it is linking against was compiled with
-** the desired setting of the [SQLITE_THREADSAFE] macro.
-**
-** This interface only reports on the compile-time mutex setting
-** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with
-** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but
-** can be fully or partially disabled using a call to [sqlite3_config()]
-** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD],
-** or [SQLITE_CONFIG_SERIALIZED]. ^(The return value of the
-** sqlite3_threadsafe() function shows only the compile-time setting of
-** thread safety, not any run-time changes to that setting made by
-** sqlite3_config(). In other words, the return value from sqlite3_threadsafe()
-** is unchanged by calls to sqlite3_config().)^
-**
-** See the [threading mode] documentation for additional information.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_threadsafe(void);
-
-/*
-** CAPI3REF: Database Connection Handle
-** KEYWORDS: {database connection} {database connections}
-**
-** Each open SQLite database is represented by a pointer to an instance of
-** the opaque structure named "sqlite3". It is useful to think of an sqlite3
-** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and
-** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()]
-** and [sqlite3_close_v2()] are its destructors. There are many other
-** interfaces (such as
-** [sqlite3_prepare_v2()], [sqlite3_create_function()], and
-** [sqlite3_busy_timeout()] to name but three) that are methods on an
-** sqlite3 object.
-*/
-typedef struct sqlite3 sqlite3;
-
-/*
-** CAPI3REF: 64-Bit Integer Types
-** KEYWORDS: sqlite_int64 sqlite_uint64
-**
-** Because there is no cross-platform way to specify 64-bit integer types
-** SQLite includes typedefs for 64-bit signed and unsigned integers.
-**
-** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions.
-** The sqlite_int64 and sqlite_uint64 types are supported for backwards
-** compatibility only.
-**
-** ^The sqlite3_int64 and sqlite_int64 types can store integer values
-** between -9223372036854775808 and +9223372036854775807 inclusive. ^The
-** sqlite3_uint64 and sqlite_uint64 types can store integer values
-** between 0 and +18446744073709551615 inclusive.
-*/
-#ifdef SQLITE_INT64_TYPE
- typedef SQLITE_INT64_TYPE sqlite_int64;
- typedef unsigned SQLITE_INT64_TYPE sqlite_uint64;
-#elif defined(_MSC_VER) || defined(__BORLANDC__)
- typedef __int64 sqlite_int64;
- typedef unsigned __int64 sqlite_uint64;
-#else
- typedef long long int sqlite_int64;
- typedef unsigned long long int sqlite_uint64;
-#endif
-typedef sqlite_int64 sqlite3_int64;
-typedef sqlite_uint64 sqlite3_uint64;
-
-/*
-** If compiling for a processor that lacks floating point support,
-** substitute integer for floating-point.
-*/
-#ifdef SQLITE_OMIT_FLOATING_POINT
-# define double sqlite3_int64
-#endif
-
-/*
-** CAPI3REF: Closing A Database Connection
-** DESTRUCTOR: sqlite3
-**
-** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors
-** for the [sqlite3] object.
-** ^Calls to sqlite3_close() and sqlite3_close_v2() return [SQLITE_OK] if
-** the [sqlite3] object is successfully destroyed and all associated
-** resources are deallocated.
-**
-** ^If the database connection is associated with unfinalized prepared
-** statements or unfinished sqlite3_backup objects then sqlite3_close()
-** will leave the database connection open and return [SQLITE_BUSY].
-** ^If sqlite3_close_v2() is called with unfinalized prepared statements
-** and/or unfinished sqlite3_backups, then the database connection becomes
-** an unusable "zombie" which will automatically be deallocated when the
-** last prepared statement is finalized or the last sqlite3_backup is
-** finished. The sqlite3_close_v2() interface is intended for use with
-** host languages that are garbage collected, and where the order in which
-** destructors are called is arbitrary.
-**
-** Applications should [sqlite3_finalize | finalize] all [prepared statements],
-** [sqlite3_blob_close | close] all [BLOB handles], and
-** [sqlite3_backup_finish | finish] all [sqlite3_backup] objects associated
-** with the [sqlite3] object prior to attempting to close the object. ^If
-** sqlite3_close_v2() is called on a [database connection] that still has
-** outstanding [prepared statements], [BLOB handles], and/or
-** [sqlite3_backup] objects then it returns [SQLITE_OK] and the deallocation
-** of resources is deferred until all [prepared statements], [BLOB handles],
-** and [sqlite3_backup] objects are also destroyed.
-**
-** ^If an [sqlite3] object is destroyed while a transaction is open,
-** the transaction is automatically rolled back.
-**
-** The C parameter to [sqlite3_close(C)] and [sqlite3_close_v2(C)]
-** must be either a NULL
-** pointer or an [sqlite3] object pointer obtained
-** from [sqlite3_open()], [sqlite3_open16()], or
-** [sqlite3_open_v2()], and not previously closed.
-** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer
-** argument is a harmless no-op.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_close(sqlite3*);
-SQLITE_API int SQLITE_STDCALL sqlite3_close_v2(sqlite3*);
-
-/*
-** The type for a callback function.
-** This is legacy and deprecated. It is included for historical
-** compatibility and is not documented.
-*/
-typedef int (*sqlite3_callback)(void*,int,char**, char**);
-
-/*
-** CAPI3REF: One-Step Query Execution Interface
-** METHOD: sqlite3
-**
-** The sqlite3_exec() interface is a convenience wrapper around
-** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()],
-** that allows an application to run multiple statements of SQL
-** without having to use a lot of C code.
-**
-** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded,
-** semicolon-separate SQL statements passed into its 2nd argument,
-** in the context of the [database connection] passed in as its 1st
-** argument. ^If the callback function of the 3rd argument to
-** sqlite3_exec() is not NULL, then it is invoked for each result row
-** coming out of the evaluated SQL statements. ^The 4th argument to
-** sqlite3_exec() is relayed through to the 1st argument of each
-** callback invocation. ^If the callback pointer to sqlite3_exec()
-** is NULL, then no callback is ever invoked and result rows are
-** ignored.
-**
-** ^If an error occurs while evaluating the SQL statements passed into
-** sqlite3_exec(), then execution of the current statement stops and
-** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec()
-** is not NULL then any error message is written into memory obtained
-** from [sqlite3_malloc()] and passed back through the 5th parameter.
-** To avoid memory leaks, the application should invoke [sqlite3_free()]
-** on error message strings returned through the 5th parameter of
-** sqlite3_exec() after the error message string is no longer needed.
-** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors
-** occur, then sqlite3_exec() sets the pointer in its 5th parameter to
-** NULL before returning.
-**
-** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec()
-** routine returns SQLITE_ABORT without invoking the callback again and
-** without running any subsequent SQL statements.
-**
-** ^The 2nd argument to the sqlite3_exec() callback function is the
-** number of columns in the result. ^The 3rd argument to the sqlite3_exec()
-** callback is an array of pointers to strings obtained as if from
-** [sqlite3_column_text()], one for each column. ^If an element of a
-** result row is NULL then the corresponding string pointer for the
-** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the
-** sqlite3_exec() callback is an array of pointers to strings where each
-** entry represents the name of corresponding result column as obtained
-** from [sqlite3_column_name()].
-**
-** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer
-** to an empty string, or a pointer that contains only whitespace and/or
-** SQL comments, then no SQL statements are evaluated and the database
-** is not changed.
-**
-** Restrictions:
-**
-**
-** The application must ensure that the 1st parameter to sqlite3_exec()
-** is a valid and open [database connection].
-** The application must not close the [database connection] specified by
-** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running.
-** The application must not modify the SQL statement text passed into
-** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running.
-**
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_exec(
- sqlite3*, /* An open database */
- const char *sql, /* SQL to be evaluated */
- int (*callback)(void*,int,char**,char**), /* Callback function */
- void *, /* 1st argument to callback */
- char **errmsg /* Error msg written here */
-);
-
-/*
-** CAPI3REF: Result Codes
-** KEYWORDS: {result code definitions}
-**
-** Many SQLite functions return an integer result code from the set shown
-** here in order to indicate success or failure.
-**
-** New error codes may be added in future versions of SQLite.
-**
-** See also: [extended result code definitions]
-*/
-#define SQLITE_OK 0 /* Successful result */
-/* beginning-of-error-codes */
-#define SQLITE_ERROR 1 /* SQL error or missing database */
-#define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */
-#define SQLITE_PERM 3 /* Access permission denied */
-#define SQLITE_ABORT 4 /* Callback routine requested an abort */
-#define SQLITE_BUSY 5 /* The database file is locked */
-#define SQLITE_LOCKED 6 /* A table in the database is locked */
-#define SQLITE_NOMEM 7 /* A malloc() failed */
-#define SQLITE_READONLY 8 /* Attempt to write a readonly database */
-#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/
-#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */
-#define SQLITE_CORRUPT 11 /* The database disk image is malformed */
-#define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */
-#define SQLITE_FULL 13 /* Insertion failed because database is full */
-#define SQLITE_CANTOPEN 14 /* Unable to open the database file */
-#define SQLITE_PROTOCOL 15 /* Database lock protocol error */
-#define SQLITE_EMPTY 16 /* Database is empty */
-#define SQLITE_SCHEMA 17 /* The database schema changed */
-#define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */
-#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */
-#define SQLITE_MISMATCH 20 /* Data type mismatch */
-#define SQLITE_MISUSE 21 /* Library used incorrectly */
-#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */
-#define SQLITE_AUTH 23 /* Authorization denied */
-#define SQLITE_FORMAT 24 /* Auxiliary database format error */
-#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */
-#define SQLITE_NOTADB 26 /* File opened that is not a database file */
-#define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */
-#define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */
-#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */
-#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */
-/* end-of-error-codes */
-
-/*
-** CAPI3REF: Extended Result Codes
-** KEYWORDS: {extended result code definitions}
-**
-** In its default configuration, SQLite API routines return one of 30 integer
-** [result codes]. However, experience has shown that many of
-** these result codes are too coarse-grained. They do not provide as
-** much information about problems as programmers might like. In an effort to
-** address this, newer versions of SQLite (version 3.3.8 and later) include
-** support for additional result codes that provide more detailed information
-** about errors. These [extended result codes] are enabled or disabled
-** on a per database connection basis using the
-** [sqlite3_extended_result_codes()] API. Or, the extended code for
-** the most recent error can be obtained using
-** [sqlite3_extended_errcode()].
-*/
-#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8))
-#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8))
-#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8))
-#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8))
-#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8))
-#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8))
-#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8))
-#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8))
-#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8))
-#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8))
-#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8))
-#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8))
-#define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8))
-#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8))
-#define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8))
-#define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8))
-#define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8))
-#define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8))
-#define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8))
-#define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8))
-#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8))
-#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8))
-#define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8))
-#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8))
-#define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8))
-#define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8))
-#define SQLITE_IOERR_VNODE (SQLITE_IOERR | (27<<8))
-#define SQLITE_IOERR_AUTH (SQLITE_IOERR | (28<<8))
-#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
-#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
-#define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8))
-#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8))
-#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8))
-#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8))
-#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8))
-#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8))
-#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8))
-#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8))
-#define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8))
-#define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8))
-#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8))
-#define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8))
-#define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8))
-#define SQLITE_CONSTRAINT_FOREIGNKEY (SQLITE_CONSTRAINT | (3<<8))
-#define SQLITE_CONSTRAINT_FUNCTION (SQLITE_CONSTRAINT | (4<<8))
-#define SQLITE_CONSTRAINT_NOTNULL (SQLITE_CONSTRAINT | (5<<8))
-#define SQLITE_CONSTRAINT_PRIMARYKEY (SQLITE_CONSTRAINT | (6<<8))
-#define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8))
-#define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8))
-#define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8))
-#define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8))
-#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8))
-#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8))
-#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8))
-#define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8))
-#define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8))
-
-/*
-** CAPI3REF: Flags For File Open Operations
-**
-** These bit values are intended for use in the
-** 3rd parameter to the [sqlite3_open_v2()] interface and
-** in the 4th parameter to the [sqlite3_vfs.xOpen] method.
-*/
-#define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */
-#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */
-#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */
-#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */
-#define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */
-#define SQLITE_OPEN_AUTOPROXY 0x00000020 /* VFS only */
-#define SQLITE_OPEN_URI 0x00000040 /* Ok for sqlite3_open_v2() */
-#define SQLITE_OPEN_MEMORY 0x00000080 /* Ok for sqlite3_open_v2() */
-#define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */
-#define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */
-#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */
-#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */
-#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */
-#define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */
-#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */
-#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */
-#define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */
-#define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */
-#define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */
-#define SQLITE_OPEN_WAL 0x00080000 /* VFS only */
-
-/* Reserved: 0x00F00000 */
-
-/*
-** CAPI3REF: Device Characteristics
-**
-** The xDeviceCharacteristics method of the [sqlite3_io_methods]
-** object returns an integer which is a vector of these
-** bit values expressing I/O characteristics of the mass storage
-** device that holds the file that the [sqlite3_io_methods]
-** refers to.
-**
-** The SQLITE_IOCAP_ATOMIC property means that all writes of
-** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values
-** mean that writes of blocks that are nnn bytes in size and
-** are aligned to an address which is an integer multiple of
-** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means
-** that when data is appended to a file, the data is appended
-** first then the size of the file is extended, never the other
-** way around. The SQLITE_IOCAP_SEQUENTIAL property means that
-** information is written to disk in the same order as calls
-** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that
-** after reboot following a crash or power loss, the only bytes in a
-** file that were written at the application level might have changed
-** and that adjacent bytes, even bytes within the same sector are
-** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
-** flag indicate that a file cannot be deleted when open. The
-** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on
-** read-only media and cannot be changed even by processes with
-** elevated privileges.
-*/
-#define SQLITE_IOCAP_ATOMIC 0x00000001
-#define SQLITE_IOCAP_ATOMIC512 0x00000002
-#define SQLITE_IOCAP_ATOMIC1K 0x00000004
-#define SQLITE_IOCAP_ATOMIC2K 0x00000008
-#define SQLITE_IOCAP_ATOMIC4K 0x00000010
-#define SQLITE_IOCAP_ATOMIC8K 0x00000020
-#define SQLITE_IOCAP_ATOMIC16K 0x00000040
-#define SQLITE_IOCAP_ATOMIC32K 0x00000080
-#define SQLITE_IOCAP_ATOMIC64K 0x00000100
-#define SQLITE_IOCAP_SAFE_APPEND 0x00000200
-#define SQLITE_IOCAP_SEQUENTIAL 0x00000400
-#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800
-#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000
-#define SQLITE_IOCAP_IMMUTABLE 0x00002000
-
-/*
-** CAPI3REF: File Locking Levels
-**
-** SQLite uses one of these integer values as the second
-** argument to calls it makes to the xLock() and xUnlock() methods
-** of an [sqlite3_io_methods] object.
-*/
-#define SQLITE_LOCK_NONE 0
-#define SQLITE_LOCK_SHARED 1
-#define SQLITE_LOCK_RESERVED 2
-#define SQLITE_LOCK_PENDING 3
-#define SQLITE_LOCK_EXCLUSIVE 4
-
-/*
-** CAPI3REF: Synchronization Type Flags
-**
-** When SQLite invokes the xSync() method of an
-** [sqlite3_io_methods] object it uses a combination of
-** these integer values as the second argument.
-**
-** When the SQLITE_SYNC_DATAONLY flag is used, it means that the
-** sync operation only needs to flush data to mass storage. Inode
-** information need not be flushed. If the lower four bits of the flag
-** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics.
-** If the lower four bits equal SQLITE_SYNC_FULL, that means
-** to use Mac OS X style fullsync instead of fsync().
-**
-** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags
-** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL
-** settings. The [synchronous pragma] determines when calls to the
-** xSync VFS method occur and applies uniformly across all platforms.
-** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how
-** energetic or rigorous or forceful the sync operations are and
-** only make a difference on Mac OSX for the default SQLite code.
-** (Third-party VFS implementations might also make the distinction
-** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the
-** operating systems natively supported by SQLite, only Mac OSX
-** cares about the difference.)
-*/
-#define SQLITE_SYNC_NORMAL 0x00002
-#define SQLITE_SYNC_FULL 0x00003
-#define SQLITE_SYNC_DATAONLY 0x00010
-
-/*
-** CAPI3REF: OS Interface Open File Handle
-**
-** An [sqlite3_file] object represents an open file in the
-** [sqlite3_vfs | OS interface layer]. Individual OS interface
-** implementations will
-** want to subclass this object by appending additional fields
-** for their own use. The pMethods entry is a pointer to an
-** [sqlite3_io_methods] object that defines methods for performing
-** I/O operations on the open file.
-*/
-typedef struct sqlite3_file sqlite3_file;
-struct sqlite3_file {
- const struct sqlite3_io_methods *pMethods; /* Methods for an open file */
-};
-
-/*
-** CAPI3REF: OS Interface File Virtual Methods Object
-**
-** Every file opened by the [sqlite3_vfs.xOpen] method populates an
-** [sqlite3_file] object (or, more commonly, a subclass of the
-** [sqlite3_file] object) with a pointer to an instance of this object.
-** This object defines the methods used to perform various operations
-** against the open file represented by the [sqlite3_file] object.
-**
-** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element
-** to a non-NULL pointer, then the sqlite3_io_methods.xClose method
-** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The
-** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen]
-** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element
-** to NULL.
-**
-** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or
-** [SQLITE_SYNC_FULL]. The first choice is the normal fsync().
-** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY]
-** flag may be ORed in to indicate that only the data of the file
-** and not its inode needs to be synced.
-**
-** The integer values to xLock() and xUnlock() are one of
-**
-** [SQLITE_LOCK_NONE],
-** [SQLITE_LOCK_SHARED],
-** [SQLITE_LOCK_RESERVED],
-** [SQLITE_LOCK_PENDING], or
-** [SQLITE_LOCK_EXCLUSIVE].
-**
-** xLock() increases the lock. xUnlock() decreases the lock.
-** The xCheckReservedLock() method checks whether any database connection,
-** either in this process or in some other process, is holding a RESERVED,
-** PENDING, or EXCLUSIVE lock on the file. It returns true
-** if such a lock exists and false otherwise.
-**
-** The xFileControl() method is a generic interface that allows custom
-** VFS implementations to directly control an open file using the
-** [sqlite3_file_control()] interface. The second "op" argument is an
-** integer opcode. The third argument is a generic pointer intended to
-** point to a structure that may contain arguments or space in which to
-** write return values. Potential uses for xFileControl() might be
-** functions to enable blocking locks with timeouts, to change the
-** locking strategy (for example to use dot-file locks), to inquire
-** about the status of a lock, or to break stale locks. The SQLite
-** core reserves all opcodes less than 100 for its own use.
-** A [file control opcodes | list of opcodes] less than 100 is available.
-** Applications that define a custom xFileControl method should use opcodes
-** greater than 100 to avoid conflicts. VFS implementations should
-** return [SQLITE_NOTFOUND] for file control opcodes that they do not
-** recognize.
-**
-** The xSectorSize() method returns the sector size of the
-** device that underlies the file. The sector size is the
-** minimum write that can be performed without disturbing
-** other bytes in the file. The xDeviceCharacteristics()
-** method returns a bit vector describing behaviors of the
-** underlying device:
-**
-**
-** [SQLITE_IOCAP_ATOMIC]
-** [SQLITE_IOCAP_ATOMIC512]
-** [SQLITE_IOCAP_ATOMIC1K]
-** [SQLITE_IOCAP_ATOMIC2K]
-** [SQLITE_IOCAP_ATOMIC4K]
-** [SQLITE_IOCAP_ATOMIC8K]
-** [SQLITE_IOCAP_ATOMIC16K]
-** [SQLITE_IOCAP_ATOMIC32K]
-** [SQLITE_IOCAP_ATOMIC64K]
-** [SQLITE_IOCAP_SAFE_APPEND]
-** [SQLITE_IOCAP_SEQUENTIAL]
-**
-**
-** The SQLITE_IOCAP_ATOMIC property means that all writes of
-** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values
-** mean that writes of blocks that are nnn bytes in size and
-** are aligned to an address which is an integer multiple of
-** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means
-** that when data is appended to a file, the data is appended
-** first then the size of the file is extended, never the other
-** way around. The SQLITE_IOCAP_SEQUENTIAL property means that
-** information is written to disk in the same order as calls
-** to xWrite().
-**
-** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill
-** in the unread portions of the buffer with zeros. A VFS that
-** fails to zero-fill short reads might seem to work. However,
-** failure to zero-fill short reads will eventually lead to
-** database corruption.
-*/
-typedef struct sqlite3_io_methods sqlite3_io_methods;
-struct sqlite3_io_methods {
- int iVersion;
- int (*xClose)(sqlite3_file*);
- int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst);
- int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst);
- int (*xTruncate)(sqlite3_file*, sqlite3_int64 size);
- int (*xSync)(sqlite3_file*, int flags);
- int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize);
- int (*xLock)(sqlite3_file*, int);
- int (*xUnlock)(sqlite3_file*, int);
- int (*xCheckReservedLock)(sqlite3_file*, int *pResOut);
- int (*xFileControl)(sqlite3_file*, int op, void *pArg);
- int (*xSectorSize)(sqlite3_file*);
- int (*xDeviceCharacteristics)(sqlite3_file*);
- /* Methods above are valid for version 1 */
- int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**);
- int (*xShmLock)(sqlite3_file*, int offset, int n, int flags);
- void (*xShmBarrier)(sqlite3_file*);
- int (*xShmUnmap)(sqlite3_file*, int deleteFlag);
- /* Methods above are valid for version 2 */
- int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp);
- int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p);
- /* Methods above are valid for version 3 */
- /* Additional methods may be added in future releases */
-};
-
-/*
-** CAPI3REF: Standard File Control Opcodes
-** KEYWORDS: {file control opcodes} {file control opcode}
-**
-** These integer constants are opcodes for the xFileControl method
-** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()]
-** interface.
-**
-**
-** [[SQLITE_FCNTL_LOCKSTATE]]
-** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This
-** opcode causes the xFileControl method to write the current state of
-** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED],
-** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE])
-** into an integer that the pArg argument points to. This capability
-** is used during testing and is only available when the SQLITE_TEST
-** compile-time option is used.
-**
-** [[SQLITE_FCNTL_SIZE_HINT]]
-** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS
-** layer a hint of how large the database file will grow to be during the
-** current transaction. This hint is not guaranteed to be accurate but it
-** is often close. The underlying VFS might choose to preallocate database
-** file space based on this hint in order to help writes to the database
-** file run faster.
-**
-** [[SQLITE_FCNTL_CHUNK_SIZE]]
-** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS
-** extends and truncates the database file in chunks of a size specified
-** by the user. The fourth argument to [sqlite3_file_control()] should
-** point to an integer (type int) containing the new chunk-size to use
-** for the nominated database. Allocating database file space in large
-** chunks (say 1MB at a time), may reduce file-system fragmentation and
-** improve performance on some systems.
-**
-** [[SQLITE_FCNTL_FILE_POINTER]]
-** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer
-** to the [sqlite3_file] object associated with a particular database
-** connection. See also [SQLITE_FCNTL_JOURNAL_POINTER].
-**
-** [[SQLITE_FCNTL_JOURNAL_POINTER]]
-** The [SQLITE_FCNTL_JOURNAL_POINTER] opcode is used to obtain a pointer
-** to the [sqlite3_file] object associated with the journal file (either
-** the [rollback journal] or the [write-ahead log]) for a particular database
-** connection. See also [SQLITE_FCNTL_FILE_POINTER].
-**
-** [[SQLITE_FCNTL_SYNC_OMITTED]]
-** No longer in use.
-**
-** [[SQLITE_FCNTL_SYNC]]
-** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and
-** sent to the VFS immediately before the xSync method is invoked on a
-** database file descriptor. Or, if the xSync method is not invoked
-** because the user has configured SQLite with
-** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place
-** of the xSync method. In most cases, the pointer argument passed with
-** this file-control is NULL. However, if the database file is being synced
-** as part of a multi-database commit, the argument points to a nul-terminated
-** string containing the transactions master-journal file name. VFSes that
-** do not need this signal should silently ignore this opcode. Applications
-** should not call [sqlite3_file_control()] with this opcode as doing so may
-** disrupt the operation of the specialized VFSes that do require it.
-**
-** [[SQLITE_FCNTL_COMMIT_PHASETWO]]
-** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite
-** and sent to the VFS after a transaction has been committed immediately
-** but before the database is unlocked. VFSes that do not need this signal
-** should silently ignore this opcode. Applications should not call
-** [sqlite3_file_control()] with this opcode as doing so may disrupt the
-** operation of the specialized VFSes that do require it.
-**
-** [[SQLITE_FCNTL_WIN32_AV_RETRY]]
-** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic
-** retry counts and intervals for certain disk I/O operations for the
-** windows [VFS] in order to provide robustness in the presence of
-** anti-virus programs. By default, the windows VFS will retry file read,
-** file write, and file delete operations up to 10 times, with a delay
-** of 25 milliseconds before the first retry and with the delay increasing
-** by an additional 25 milliseconds with each subsequent retry. This
-** opcode allows these two values (10 retries and 25 milliseconds of delay)
-** to be adjusted. The values are changed for all database connections
-** within the same process. The argument is a pointer to an array of two
-** integers where the first integer i the new retry count and the second
-** integer is the delay. If either integer is negative, then the setting
-** is not changed but instead the prior value of that setting is written
-** into the array entry, allowing the current retry settings to be
-** interrogated. The zDbName parameter is ignored.
-**
-** [[SQLITE_FCNTL_PERSIST_WAL]]
-** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the
-** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary
-** write ahead log and shared memory files used for transaction control
-** are automatically deleted when the latest connection to the database
-** closes. Setting persistent WAL mode causes those files to persist after
-** close. Persisting the files is useful when other processes that do not
-** have write permission on the directory containing the database file want
-** to read the database file, as the WAL and shared memory files must exist
-** in order for the database to be readable. The fourth parameter to
-** [sqlite3_file_control()] for this opcode should be a pointer to an integer.
-** That integer is 0 to disable persistent WAL mode or 1 to enable persistent
-** WAL mode. If the integer is -1, then it is overwritten with the current
-** WAL persistence setting.
-**
-** [[SQLITE_FCNTL_POWERSAFE_OVERWRITE]]
-** ^The [SQLITE_FCNTL_POWERSAFE_OVERWRITE] opcode is used to set or query the
-** persistent "powersafe-overwrite" or "PSOW" setting. The PSOW setting
-** determines the [SQLITE_IOCAP_POWERSAFE_OVERWRITE] bit of the
-** xDeviceCharacteristics methods. The fourth parameter to
-** [sqlite3_file_control()] for this opcode should be a pointer to an integer.
-** That integer is 0 to disable zero-damage mode or 1 to enable zero-damage
-** mode. If the integer is -1, then it is overwritten with the current
-** zero-damage mode setting.
-**
-** [[SQLITE_FCNTL_OVERWRITE]]
-** ^The [SQLITE_FCNTL_OVERWRITE] opcode is invoked by SQLite after opening
-** a write transaction to indicate that, unless it is rolled back for some
-** reason, the entire database file will be overwritten by the current
-** transaction. This is used by VACUUM operations.
-**
-** [[SQLITE_FCNTL_VFSNAME]]
-** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of
-** all [VFSes] in the VFS stack. The names are of all VFS shims and the
-** final bottom-level VFS are written into memory obtained from
-** [sqlite3_malloc()] and the result is stored in the char* variable
-** that the fourth parameter of [sqlite3_file_control()] points to.
-** The caller is responsible for freeing the memory when done. As with
-** all file-control actions, there is no guarantee that this will actually
-** do anything. Callers should initialize the char* variable to a NULL
-** pointer in case this file-control is not implemented. This file-control
-** is intended for diagnostic use only.
-**
-** [[SQLITE_FCNTL_VFS_POINTER]]
-** ^The [SQLITE_FCNTL_VFS_POINTER] opcode finds a pointer to the top-level
-** [VFSes] currently in use. ^(The argument X in
-** sqlite3_file_control(db,SQLITE_FCNTL_VFS_POINTER,X) must be
-** of type "[sqlite3_vfs] **". This opcodes will set *X
-** to a pointer to the top-level VFS.)^
-** ^When there are multiple VFS shims in the stack, this opcode finds the
-** upper-most shim only.
-**
-** [[SQLITE_FCNTL_PRAGMA]]
-** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA]
-** file control is sent to the open [sqlite3_file] object corresponding
-** to the database file to which the pragma statement refers. ^The argument
-** to the [SQLITE_FCNTL_PRAGMA] file control is an array of
-** pointers to strings (char**) in which the second element of the array
-** is the name of the pragma and the third element is the argument to the
-** pragma or NULL if the pragma has no argument. ^The handler for an
-** [SQLITE_FCNTL_PRAGMA] file control can optionally make the first element
-** of the char** argument point to a string obtained from [sqlite3_mprintf()]
-** or the equivalent and that string will become the result of the pragma or
-** the error message if the pragma fails. ^If the
-** [SQLITE_FCNTL_PRAGMA] file control returns [SQLITE_NOTFOUND], then normal
-** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA]
-** file control returns [SQLITE_OK], then the parser assumes that the
-** VFS has handled the PRAGMA itself and the parser generates a no-op
-** prepared statement if result string is NULL, or that returns a copy
-** of the result string if the string is non-NULL.
-** ^If the [SQLITE_FCNTL_PRAGMA] file control returns
-** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means
-** that the VFS encountered an error while handling the [PRAGMA] and the
-** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA]
-** file control occurs at the beginning of pragma statement analysis and so
-** it is able to override built-in [PRAGMA] statements.
-**
-** [[SQLITE_FCNTL_BUSYHANDLER]]
-** ^The [SQLITE_FCNTL_BUSYHANDLER]
-** file-control may be invoked by SQLite on the database file handle
-** shortly after it is opened in order to provide a custom VFS with access
-** to the connections busy-handler callback. The argument is of type (void **)
-** - an array of two (void *) values. The first (void *) actually points
-** to a function of type (int (*)(void *)). In order to invoke the connections
-** busy-handler, this function should be invoked with the second (void *) in
-** the array as the only argument. If it returns non-zero, then the operation
-** should be retried. If it returns zero, the custom VFS should abandon the
-** current operation.
-**
-** [[SQLITE_FCNTL_TEMPFILENAME]]
-** ^Application can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control
-** to have SQLite generate a
-** temporary filename using the same algorithm that is followed to generate
-** temporary filenames for TEMP tables and other internal uses. The
-** argument should be a char** which will be filled with the filename
-** written into memory obtained from [sqlite3_malloc()]. The caller should
-** invoke [sqlite3_free()] on the result to avoid a memory leak.
-**
-** [[SQLITE_FCNTL_MMAP_SIZE]]
-** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the
-** maximum number of bytes that will be used for memory-mapped I/O.
-** The argument is a pointer to a value of type sqlite3_int64 that
-** is an advisory maximum number of bytes in the file to memory map. The
-** pointer is overwritten with the old value. The limit is not changed if
-** the value originally pointed to is negative, and so the current limit
-** can be queried by passing in a pointer to a negative number. This
-** file-control is used internally to implement [PRAGMA mmap_size].
-**
-** [[SQLITE_FCNTL_TRACE]]
-** The [SQLITE_FCNTL_TRACE] file control provides advisory information
-** to the VFS about what the higher layers of the SQLite stack are doing.
-** This file control is used by some VFS activity tracing [shims].
-** The argument is a zero-terminated string. Higher layers in the
-** SQLite stack may generate instances of this file control if
-** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled.
-**
-** [[SQLITE_FCNTL_HAS_MOVED]]
-** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a
-** pointer to an integer and it writes a boolean into that integer depending
-** on whether or not the file has been renamed, moved, or deleted since it
-** was first opened.
-**
-** [[SQLITE_FCNTL_WIN32_SET_HANDLE]]
-** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This
-** opcode causes the xFileControl method to swap the file handle with the one
-** pointed to by the pArg argument. This capability is used during testing
-** and only needs to be supported when SQLITE_TEST is defined.
-**
-** [[SQLITE_FCNTL_WAL_BLOCK]]
-** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might
-** be advantageous to block on the next WAL lock if the lock is not immediately
-** available. The WAL subsystem issues this signal during rare
-** circumstances in order to fix a problem with priority inversion.
-** Applications should not use this file-control.
-**
-** [[SQLITE_FCNTL_ZIPVFS]]
-** The [SQLITE_FCNTL_ZIPVFS] opcode is implemented by zipvfs only. All other
-** VFS should return SQLITE_NOTFOUND for this opcode.
-**
-** [[SQLITE_FCNTL_RBU]]
-** The [SQLITE_FCNTL_RBU] opcode is implemented by the special VFS used by
-** the RBU extension only. All other VFS should return SQLITE_NOTFOUND for
-** this opcode.
-**
-*/
-#define SQLITE_FCNTL_LOCKSTATE 1
-#define SQLITE_FCNTL_GET_LOCKPROXYFILE 2
-#define SQLITE_FCNTL_SET_LOCKPROXYFILE 3
-#define SQLITE_FCNTL_LAST_ERRNO 4
-#define SQLITE_FCNTL_SIZE_HINT 5
-#define SQLITE_FCNTL_CHUNK_SIZE 6
-#define SQLITE_FCNTL_FILE_POINTER 7
-#define SQLITE_FCNTL_SYNC_OMITTED 8
-#define SQLITE_FCNTL_WIN32_AV_RETRY 9
-#define SQLITE_FCNTL_PERSIST_WAL 10
-#define SQLITE_FCNTL_OVERWRITE 11
-#define SQLITE_FCNTL_VFSNAME 12
-#define SQLITE_FCNTL_POWERSAFE_OVERWRITE 13
-#define SQLITE_FCNTL_PRAGMA 14
-#define SQLITE_FCNTL_BUSYHANDLER 15
-#define SQLITE_FCNTL_TEMPFILENAME 16
-#define SQLITE_FCNTL_MMAP_SIZE 18
-#define SQLITE_FCNTL_TRACE 19
-#define SQLITE_FCNTL_HAS_MOVED 20
-#define SQLITE_FCNTL_SYNC 21
-#define SQLITE_FCNTL_COMMIT_PHASETWO 22
-#define SQLITE_FCNTL_WIN32_SET_HANDLE 23
-#define SQLITE_FCNTL_WAL_BLOCK 24
-#define SQLITE_FCNTL_ZIPVFS 25
-#define SQLITE_FCNTL_RBU 26
-#define SQLITE_FCNTL_VFS_POINTER 27
-#define SQLITE_FCNTL_JOURNAL_POINTER 28
-
-/* deprecated names */
-#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
-#define SQLITE_SET_LOCKPROXYFILE SQLITE_FCNTL_SET_LOCKPROXYFILE
-#define SQLITE_LAST_ERRNO SQLITE_FCNTL_LAST_ERRNO
-
-
-/*
-** CAPI3REF: Mutex Handle
-**
-** The mutex module within SQLite defines [sqlite3_mutex] to be an
-** abstract type for a mutex object. The SQLite core never looks
-** at the internal representation of an [sqlite3_mutex]. It only
-** deals with pointers to the [sqlite3_mutex] object.
-**
-** Mutexes are created using [sqlite3_mutex_alloc()].
-*/
-typedef struct sqlite3_mutex sqlite3_mutex;
-
-/*
-** CAPI3REF: Loadable Extension Thunk
-**
-** A pointer to the opaque sqlite3_api_routines structure is passed as
-** the third parameter to entry points of [loadable extensions]. This
-** structure must be typedefed in order to work around compiler warnings
-** on some platforms.
-*/
-typedef struct sqlite3_api_routines sqlite3_api_routines;
-
-/*
-** CAPI3REF: OS Interface Object
-**
-** An instance of the sqlite3_vfs object defines the interface between
-** the SQLite core and the underlying operating system. The "vfs"
-** in the name of the object stands for "virtual file system". See
-** the [VFS | VFS documentation] for further information.
-**
-** The value of the iVersion field is initially 1 but may be larger in
-** future versions of SQLite. Additional fields may be appended to this
-** object when the iVersion value is increased. Note that the structure
-** of the sqlite3_vfs object changes in the transaction between
-** SQLite version 3.5.9 and 3.6.0 and yet the iVersion field was not
-** modified.
-**
-** The szOsFile field is the size of the subclassed [sqlite3_file]
-** structure used by this VFS. mxPathname is the maximum length of
-** a pathname in this VFS.
-**
-** Registered sqlite3_vfs objects are kept on a linked list formed by
-** the pNext pointer. The [sqlite3_vfs_register()]
-** and [sqlite3_vfs_unregister()] interfaces manage this list
-** in a thread-safe way. The [sqlite3_vfs_find()] interface
-** searches the list. Neither the application code nor the VFS
-** implementation should use the pNext pointer.
-**
-** The pNext field is the only field in the sqlite3_vfs
-** structure that SQLite will ever modify. SQLite will only access
-** or modify this field while holding a particular static mutex.
-** The application should never modify anything within the sqlite3_vfs
-** object once the object has been registered.
-**
-** The zName field holds the name of the VFS module. The name must
-** be unique across all VFS modules.
-**
-** [[sqlite3_vfs.xOpen]]
-** ^SQLite guarantees that the zFilename parameter to xOpen
-** is either a NULL pointer or string obtained
-** from xFullPathname() with an optional suffix added.
-** ^If a suffix is added to the zFilename parameter, it will
-** consist of a single "-" character followed by no more than
-** 11 alphanumeric and/or "-" characters.
-** ^SQLite further guarantees that
-** the string will be valid and unchanged until xClose() is
-** called. Because of the previous sentence,
-** the [sqlite3_file] can safely store a pointer to the
-** filename if it needs to remember the filename for some reason.
-** If the zFilename parameter to xOpen is a NULL pointer then xOpen
-** must invent its own temporary name for the file. ^Whenever the
-** xFilename parameter is NULL it will also be the case that the
-** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE].
-**
-** The flags argument to xOpen() includes all bits set in
-** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()]
-** or [sqlite3_open16()] is used, then flags includes at least
-** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE].
-** If xOpen() opens a file read-only then it sets *pOutFlags to
-** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set.
-**
-** ^(SQLite will also add one of the following flags to the xOpen()
-** call, depending on the object being opened:
-**
-**
-** [SQLITE_OPEN_MAIN_DB]
-** [SQLITE_OPEN_MAIN_JOURNAL]
-** [SQLITE_OPEN_TEMP_DB]
-** [SQLITE_OPEN_TEMP_JOURNAL]
-** [SQLITE_OPEN_TRANSIENT_DB]
-** [SQLITE_OPEN_SUBJOURNAL]
-** [SQLITE_OPEN_MASTER_JOURNAL]
-** [SQLITE_OPEN_WAL]
-** )^
-**
-** The file I/O implementation can use the object type flags to
-** change the way it deals with files. For example, an application
-** that does not care about crash recovery or rollback might make
-** the open of a journal file a no-op. Writes to this journal would
-** also be no-ops, and any attempt to read the journal would return
-** SQLITE_IOERR. Or the implementation might recognize that a database
-** file will be doing page-aligned sector reads and writes in a random
-** order and set up its I/O subsystem accordingly.
-**
-** SQLite might also add one of the following flags to the xOpen method:
-**
-**
-** [SQLITE_OPEN_DELETEONCLOSE]
-** [SQLITE_OPEN_EXCLUSIVE]
-**
-**
-** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be
-** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE]
-** will be set for TEMP databases and their journals, transient
-** databases, and subjournals.
-**
-** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction
-** with the [SQLITE_OPEN_CREATE] flag, which are both directly
-** analogous to the O_EXCL and O_CREAT flags of the POSIX open()
-** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the
-** SQLITE_OPEN_CREATE, is used to indicate that file should always
-** be created, and that it is an error if it already exists.
-** It is not used to indicate the file should be opened
-** for exclusive access.
-**
-** ^At least szOsFile bytes of memory are allocated by SQLite
-** to hold the [sqlite3_file] structure passed as the third
-** argument to xOpen. The xOpen method does not have to
-** allocate the structure; it should just fill it in. Note that
-** the xOpen method must set the sqlite3_file.pMethods to either
-** a valid [sqlite3_io_methods] object or to NULL. xOpen must do
-** this even if the open fails. SQLite expects that the sqlite3_file.pMethods
-** element will be valid after xOpen returns regardless of the success
-** or failure of the xOpen call.
-**
-** [[sqlite3_vfs.xAccess]]
-** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS]
-** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to
-** test whether a file is readable and writable, or [SQLITE_ACCESS_READ]
-** to test whether a file is at least readable. The file can be a
-** directory.
-**
-** ^SQLite will always allocate at least mxPathname+1 bytes for the
-** output buffer xFullPathname. The exact size of the output buffer
-** is also passed as a parameter to both methods. If the output buffer
-** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is
-** handled as a fatal error by SQLite, vfs implementations should endeavor
-** to prevent this by setting mxPathname to a sufficiently large value.
-**
-** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64()
-** interfaces are not strictly a part of the filesystem, but they are
-** included in the VFS structure for completeness.
-** The xRandomness() function attempts to return nBytes bytes
-** of good-quality randomness into zOut. The return value is
-** the actual number of bytes of randomness obtained.
-** The xSleep() method causes the calling thread to sleep for at
-** least the number of microseconds given. ^The xCurrentTime()
-** method returns a Julian Day Number for the current date and time as
-** a floating point value.
-** ^The xCurrentTimeInt64() method returns, as an integer, the Julian
-** Day Number multiplied by 86400000 (the number of milliseconds in
-** a 24-hour day).
-** ^SQLite will use the xCurrentTimeInt64() method to get the current
-** date and time if that method is available (if iVersion is 2 or
-** greater and the function pointer is not NULL) and will fall back
-** to xCurrentTime() if xCurrentTimeInt64() is unavailable.
-**
-** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces
-** are not used by the SQLite core. These optional interfaces are provided
-** by some VFSes to facilitate testing of the VFS code. By overriding
-** system calls with functions under its control, a test program can
-** simulate faults and error conditions that would otherwise be difficult
-** or impossible to induce. The set of system calls that can be overridden
-** varies from one VFS to another, and from one version of the same VFS to the
-** next. Applications that use these interfaces must be prepared for any
-** or all of these interfaces to be NULL or for their behavior to change
-** from one release to the next. Applications must not attempt to access
-** any of these methods if the iVersion of the VFS is less than 3.
-*/
-typedef struct sqlite3_vfs sqlite3_vfs;
-typedef void (*sqlite3_syscall_ptr)(void);
-struct sqlite3_vfs {
- int iVersion; /* Structure version number (currently 3) */
- int szOsFile; /* Size of subclassed sqlite3_file */
- int mxPathname; /* Maximum file pathname length */
- sqlite3_vfs *pNext; /* Next registered VFS */
- const char *zName; /* Name of this virtual file system */
- void *pAppData; /* Pointer to application-specific data */
- int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*,
- int flags, int *pOutFlags);
- int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir);
- int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut);
- int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut);
- void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename);
- void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg);
- void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void);
- void (*xDlClose)(sqlite3_vfs*, void*);
- int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut);
- int (*xSleep)(sqlite3_vfs*, int microseconds);
- int (*xCurrentTime)(sqlite3_vfs*, double*);
- int (*xGetLastError)(sqlite3_vfs*, int, char *);
- /*
- ** The methods above are in version 1 of the sqlite_vfs object
- ** definition. Those that follow are added in version 2 or later
- */
- int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*);
- /*
- ** The methods above are in versions 1 and 2 of the sqlite_vfs object.
- ** Those below are for version 3 and greater.
- */
- int (*xSetSystemCall)(sqlite3_vfs*, const char *zName, sqlite3_syscall_ptr);
- sqlite3_syscall_ptr (*xGetSystemCall)(sqlite3_vfs*, const char *zName);
- const char *(*xNextSystemCall)(sqlite3_vfs*, const char *zName);
- /*
- ** The methods above are in versions 1 through 3 of the sqlite_vfs object.
- ** New fields may be appended in future versions. The iVersion
- ** value will increment whenever this happens.
- */
-};
-
-/*
-** CAPI3REF: Flags for the xAccess VFS method
-**
-** These integer constants can be used as the third parameter to
-** the xAccess method of an [sqlite3_vfs] object. They determine
-** what kind of permissions the xAccess method is looking for.
-** With SQLITE_ACCESS_EXISTS, the xAccess method
-** simply checks whether the file exists.
-** With SQLITE_ACCESS_READWRITE, the xAccess method
-** checks whether the named directory is both readable and writable
-** (in other words, if files can be added, removed, and renamed within
-** the directory).
-** The SQLITE_ACCESS_READWRITE constant is currently used only by the
-** [temp_store_directory pragma], though this could change in a future
-** release of SQLite.
-** With SQLITE_ACCESS_READ, the xAccess method
-** checks whether the file is readable. The SQLITE_ACCESS_READ constant is
-** currently unused, though it might be used in a future release of
-** SQLite.
-*/
-#define SQLITE_ACCESS_EXISTS 0
-#define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */
-#define SQLITE_ACCESS_READ 2 /* Unused */
-
-/*
-** CAPI3REF: Flags for the xShmLock VFS method
-**
-** These integer constants define the various locking operations
-** allowed by the xShmLock method of [sqlite3_io_methods]. The
-** following are the only legal combinations of flags to the
-** xShmLock method:
-**
-**
-** SQLITE_SHM_LOCK | SQLITE_SHM_SHARED
-** SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE
-** SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED
-** SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE
-**
-**
-** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as
-** was given on the corresponding lock.
-**
-** The xShmLock method can transition between unlocked and SHARED or
-** between unlocked and EXCLUSIVE. It cannot transition between SHARED
-** and EXCLUSIVE.
-*/
-#define SQLITE_SHM_UNLOCK 1
-#define SQLITE_SHM_LOCK 2
-#define SQLITE_SHM_SHARED 4
-#define SQLITE_SHM_EXCLUSIVE 8
-
-/*
-** CAPI3REF: Maximum xShmLock index
-**
-** The xShmLock method on [sqlite3_io_methods] may use values
-** between 0 and this upper bound as its "offset" argument.
-** The SQLite core will never attempt to acquire or release a
-** lock outside of this range
-*/
-#define SQLITE_SHM_NLOCK 8
-
-
-/*
-** CAPI3REF: Initialize The SQLite Library
-**
-** ^The sqlite3_initialize() routine initializes the
-** SQLite library. ^The sqlite3_shutdown() routine
-** deallocates any resources that were allocated by sqlite3_initialize().
-** These routines are designed to aid in process initialization and
-** shutdown on embedded systems. Workstation applications using
-** SQLite normally do not need to invoke either of these routines.
-**
-** A call to sqlite3_initialize() is an "effective" call if it is
-** the first time sqlite3_initialize() is invoked during the lifetime of
-** the process, or if it is the first time sqlite3_initialize() is invoked
-** following a call to sqlite3_shutdown(). ^(Only an effective call
-** of sqlite3_initialize() does any initialization. All other calls
-** are harmless no-ops.)^
-**
-** A call to sqlite3_shutdown() is an "effective" call if it is the first
-** call to sqlite3_shutdown() since the last sqlite3_initialize(). ^(Only
-** an effective call to sqlite3_shutdown() does any deinitialization.
-** All other valid calls to sqlite3_shutdown() are harmless no-ops.)^
-**
-** The sqlite3_initialize() interface is threadsafe, but sqlite3_shutdown()
-** is not. The sqlite3_shutdown() interface must only be called from a
-** single thread. All open [database connections] must be closed and all
-** other SQLite resources must be deallocated prior to invoking
-** sqlite3_shutdown().
-**
-** Among other things, ^sqlite3_initialize() will invoke
-** sqlite3_os_init(). Similarly, ^sqlite3_shutdown()
-** will invoke sqlite3_os_end().
-**
-** ^The sqlite3_initialize() routine returns [SQLITE_OK] on success.
-** ^If for some reason, sqlite3_initialize() is unable to initialize
-** the library (perhaps it is unable to allocate a needed resource such
-** as a mutex) it returns an [error code] other than [SQLITE_OK].
-**
-** ^The sqlite3_initialize() routine is called internally by many other
-** SQLite interfaces so that an application usually does not need to
-** invoke sqlite3_initialize() directly. For example, [sqlite3_open()]
-** calls sqlite3_initialize() so the SQLite library will be automatically
-** initialized when [sqlite3_open()] is called if it has not be initialized
-** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT]
-** compile-time option, then the automatic calls to sqlite3_initialize()
-** are omitted and the application must call sqlite3_initialize() directly
-** prior to using any other SQLite interface. For maximum portability,
-** it is recommended that applications always invoke sqlite3_initialize()
-** directly prior to using any other SQLite interface. Future releases
-** of SQLite may require this. In other words, the behavior exhibited
-** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the
-** default behavior in some future release of SQLite.
-**
-** The sqlite3_os_init() routine does operating-system specific
-** initialization of the SQLite library. The sqlite3_os_end()
-** routine undoes the effect of sqlite3_os_init(). Typical tasks
-** performed by these routines include allocation or deallocation
-** of static resources, initialization of global variables,
-** setting up a default [sqlite3_vfs] module, or setting up
-** a default configuration using [sqlite3_config()].
-**
-** The application should never invoke either sqlite3_os_init()
-** or sqlite3_os_end() directly. The application should only invoke
-** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init()
-** interface is called automatically by sqlite3_initialize() and
-** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate
-** implementations for sqlite3_os_init() and sqlite3_os_end()
-** are built into SQLite when it is compiled for Unix, Windows, or OS/2.
-** When [custom builds | built for other platforms]
-** (using the [SQLITE_OS_OTHER=1] compile-time
-** option) the application must supply a suitable implementation for
-** sqlite3_os_init() and sqlite3_os_end(). An application-supplied
-** implementation of sqlite3_os_init() or sqlite3_os_end()
-** must return [SQLITE_OK] on success and some other [error code] upon
-** failure.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_initialize(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_shutdown(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_os_init(void);
-SQLITE_API int SQLITE_STDCALL sqlite3_os_end(void);
-
-/*
-** CAPI3REF: Configuring The SQLite Library
-**
-** The sqlite3_config() interface is used to make global configuration
-** changes to SQLite in order to tune SQLite to the specific needs of
-** the application. The default configuration is recommended for most
-** applications and so this routine is usually not necessary. It is
-** provided to support rare applications with unusual needs.
-**
-** The sqlite3_config() interface is not threadsafe. The application
-** must ensure that no other SQLite interfaces are invoked by other
-** threads while sqlite3_config() is running.
-**
-** The sqlite3_config() interface
-** may only be invoked prior to library initialization using
-** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()].
-** ^If sqlite3_config() is called after [sqlite3_initialize()] and before
-** [sqlite3_shutdown()] then it will return SQLITE_MISUSE.
-** Note, however, that ^sqlite3_config() can be called as part of the
-** implementation of an application-defined [sqlite3_os_init()].
-**
-** The first argument to sqlite3_config() is an integer
-** [configuration option] that determines
-** what property of SQLite is to be configured. Subsequent arguments
-** vary depending on the [configuration option]
-** in the first argument.
-**
-** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK].
-** ^If the option is unknown or SQLite is unable to set the option
-** then this routine returns a non-zero [error code].
-*/
-SQLITE_API int SQLITE_CDECL sqlite3_config(int, ...);
-
-/*
-** CAPI3REF: Configure database connections
-** METHOD: sqlite3
-**
-** The sqlite3_db_config() interface is used to make configuration
-** changes to a [database connection]. The interface is similar to
-** [sqlite3_config()] except that the changes apply to a single
-** [database connection] (specified in the first argument).
-**
-** The second argument to sqlite3_db_config(D,V,...) is the
-** [SQLITE_DBCONFIG_LOOKASIDE | configuration verb] - an integer code
-** that indicates what aspect of the [database connection] is being configured.
-** Subsequent arguments vary depending on the configuration verb.
-**
-** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if
-** the call is considered successful.
-*/
-SQLITE_API int SQLITE_CDECL sqlite3_db_config(sqlite3*, int op, ...);
-
-/*
-** CAPI3REF: Memory Allocation Routines
-**
-** An instance of this object defines the interface between SQLite
-** and low-level memory allocation routines.
-**
-** This object is used in only one place in the SQLite interface.
-** A pointer to an instance of this object is the argument to
-** [sqlite3_config()] when the configuration option is
-** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC].
-** By creating an instance of this object
-** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC])
-** during configuration, an application can specify an alternative
-** memory allocation subsystem for SQLite to use for all of its
-** dynamic memory needs.
-**
-** Note that SQLite comes with several [built-in memory allocators]
-** that are perfectly adequate for the overwhelming majority of applications
-** and that this object is only useful to a tiny minority of applications
-** with specialized memory allocation requirements. This object is
-** also used during testing of SQLite in order to specify an alternative
-** memory allocator that simulates memory out-of-memory conditions in
-** order to verify that SQLite recovers gracefully from such
-** conditions.
-**
-** The xMalloc, xRealloc, and xFree methods must work like the
-** malloc(), realloc() and free() functions from the standard C library.
-** ^SQLite guarantees that the second argument to
-** xRealloc is always a value returned by a prior call to xRoundup.
-**
-** xSize should return the allocated size of a memory allocation
-** previously obtained from xMalloc or xRealloc. The allocated size
-** is always at least as big as the requested size but may be larger.
-**
-** The xRoundup method returns what would be the allocated size of
-** a memory allocation given a particular requested size. Most memory
-** allocators round up memory allocations at least to the next multiple
-** of 8. Some allocators round up to a larger multiple or to a power of 2.
-** Every memory allocation request coming in through [sqlite3_malloc()]
-** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0,
-** that causes the corresponding memory allocation to fail.
-**
-** The xInit method initializes the memory allocator. For example,
-** it might allocate any require mutexes or initialize internal data
-** structures. The xShutdown method is invoked (indirectly) by
-** [sqlite3_shutdown()] and should deallocate any resources acquired
-** by xInit. The pAppData pointer is used as the only parameter to
-** xInit and xShutdown.
-**
-** SQLite holds the [SQLITE_MUTEX_STATIC_MASTER] mutex when it invokes
-** the xInit method, so the xInit method need not be threadsafe. The
-** xShutdown method is only called from [sqlite3_shutdown()] so it does
-** not need to be threadsafe either. For all other methods, SQLite
-** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the
-** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which
-** it is by default) and so the methods are automatically serialized.
-** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other
-** methods must be threadsafe or else make their own arrangements for
-** serialization.
-**
-** SQLite will never invoke xInit() more than once without an intervening
-** call to xShutdown().
-*/
-typedef struct sqlite3_mem_methods sqlite3_mem_methods;
-struct sqlite3_mem_methods {
- void *(*xMalloc)(int); /* Memory allocation function */
- void (*xFree)(void*); /* Free a prior allocation */
- void *(*xRealloc)(void*,int); /* Resize an allocation */
- int (*xSize)(void*); /* Return the size of an allocation */
- int (*xRoundup)(int); /* Round up request size to allocation size */
- int (*xInit)(void*); /* Initialize the memory allocator */
- void (*xShutdown)(void*); /* Deinitialize the memory allocator */
- void *pAppData; /* Argument to xInit() and xShutdown() */
-};
-
-/*
-** CAPI3REF: Configuration Options
-** KEYWORDS: {configuration option}
-**
-** These constants are the available integer configuration options that
-** can be passed as the first argument to the [sqlite3_config()] interface.
-**
-** New configuration options may be added in future releases of SQLite.
-** Existing configuration options might be discontinued. Applications
-** should check the return code from [sqlite3_config()] to make sure that
-** the call worked. The [sqlite3_config()] interface will return a
-** non-zero [error code] if a discontinued or unsupported configuration option
-** is invoked.
-**
-**
-** [[SQLITE_CONFIG_SINGLETHREAD]] SQLITE_CONFIG_SINGLETHREAD
-** There are no arguments to this option. ^This option sets the
-** [threading mode] to Single-thread. In other words, it disables
-** all mutexing and puts SQLite into a mode where it can only be used
-** by a single thread. ^If SQLite is compiled with
-** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
-** it is not possible to change the [threading mode] from its default
-** value of Single-thread and so [sqlite3_config()] will return
-** [SQLITE_ERROR] if called with the SQLITE_CONFIG_SINGLETHREAD
-** configuration option.
-**
-** [[SQLITE_CONFIG_MULTITHREAD]] SQLITE_CONFIG_MULTITHREAD
-** There are no arguments to this option. ^This option sets the
-** [threading mode] to Multi-thread. In other words, it disables
-** mutexing on [database connection] and [prepared statement] objects.
-** The application is responsible for serializing access to
-** [database connections] and [prepared statements]. But other mutexes
-** are enabled so that SQLite will be safe to use in a multi-threaded
-** environment as long as no two threads attempt to use the same
-** [database connection] at the same time. ^If SQLite is compiled with
-** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
-** it is not possible to set the Multi-thread [threading mode] and
-** [sqlite3_config()] will return [SQLITE_ERROR] if called with the
-** SQLITE_CONFIG_MULTITHREAD configuration option.
-**
-** [[SQLITE_CONFIG_SERIALIZED]] SQLITE_CONFIG_SERIALIZED
-** There are no arguments to this option. ^This option sets the
-** [threading mode] to Serialized. In other words, this option enables
-** all mutexes including the recursive
-** mutexes on [database connection] and [prepared statement] objects.
-** In this mode (which is the default when SQLite is compiled with
-** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access
-** to [database connections] and [prepared statements] so that the
-** application is free to use the same [database connection] or the
-** same [prepared statement] in different threads at the same time.
-** ^If SQLite is compiled with
-** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
-** it is not possible to set the Serialized [threading mode] and
-** [sqlite3_config()] will return [SQLITE_ERROR] if called with the
-** SQLITE_CONFIG_SERIALIZED configuration option.
-**
-** [[SQLITE_CONFIG_MALLOC]] SQLITE_CONFIG_MALLOC
-** ^(The SQLITE_CONFIG_MALLOC option takes a single argument which is
-** a pointer to an instance of the [sqlite3_mem_methods] structure.
-** The argument specifies
-** alternative low-level memory allocation routines to be used in place of
-** the memory allocation routines built into SQLite.)^ ^SQLite makes
-** its own private copy of the content of the [sqlite3_mem_methods] structure
-** before the [sqlite3_config()] call returns.
-**
-** [[SQLITE_CONFIG_GETMALLOC]] SQLITE_CONFIG_GETMALLOC
-** ^(The SQLITE_CONFIG_GETMALLOC option takes a single argument which
-** is a pointer to an instance of the [sqlite3_mem_methods] structure.
-** The [sqlite3_mem_methods]
-** structure is filled with the currently defined memory allocation routines.)^
-** This option can be used to overload the default memory allocation
-** routines with a wrapper that simulations memory allocation failure or
-** tracks memory usage, for example.
-**
-** [[SQLITE_CONFIG_MEMSTATUS]] SQLITE_CONFIG_MEMSTATUS
-** ^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int,
-** interpreted as a boolean, which enables or disables the collection of
-** memory allocation statistics. ^(When memory allocation statistics are
-** disabled, the following SQLite interfaces become non-operational:
-**
-** [sqlite3_memory_used()]
-** [sqlite3_memory_highwater()]
-** [sqlite3_soft_heap_limit64()]
-** [sqlite3_status64()]
-** )^
-** ^Memory allocation statistics are enabled by default unless SQLite is
-** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory
-** allocation statistics are disabled by default.
-**
-**
-** [[SQLITE_CONFIG_SCRATCH]] SQLITE_CONFIG_SCRATCH
-** ^The SQLITE_CONFIG_SCRATCH option specifies a static memory buffer
-** that SQLite can use for scratch memory. ^(There are three arguments
-** to SQLITE_CONFIG_SCRATCH: A pointer an 8-byte
-** aligned memory buffer from which the scratch allocations will be
-** drawn, the size of each scratch allocation (sz),
-** and the maximum number of scratch allocations (N).)^
-** The first argument must be a pointer to an 8-byte aligned buffer
-** of at least sz*N bytes of memory.
-** ^SQLite will not use more than one scratch buffers per thread.
-** ^SQLite will never request a scratch buffer that is more than 6
-** times the database page size.
-** ^If SQLite needs needs additional
-** scratch memory beyond what is provided by this configuration option, then
-** [sqlite3_malloc()] will be used to obtain the memory needed.
-** ^When the application provides any amount of scratch memory using
-** SQLITE_CONFIG_SCRATCH, SQLite avoids unnecessary large
-** [sqlite3_malloc|heap allocations].
-** This can help [Robson proof|prevent memory allocation failures] due to heap
-** fragmentation in low-memory embedded systems.
-**
-**
-** [[SQLITE_CONFIG_PAGECACHE]] SQLITE_CONFIG_PAGECACHE
-** ^The SQLITE_CONFIG_PAGECACHE option specifies a memory pool
-** that SQLite can use for the database page cache with the default page
-** cache implementation.
-** This configuration option is a no-op if an application-define page
-** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2].
-** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to
-** 8-byte aligned memory (pMem), the size of each page cache line (sz),
-** and the number of cache lines (N).
-** The sz argument should be the size of the largest database page
-** (a power of two between 512 and 65536) plus some extra bytes for each
-** page header. ^The number of extra bytes needed by the page header
-** can be determined using [SQLITE_CONFIG_PCACHE_HDRSZ].
-** ^It is harmless, apart from the wasted memory,
-** for the sz parameter to be larger than necessary. The pMem
-** argument must be either a NULL pointer or a pointer to an 8-byte
-** aligned block of memory of at least sz*N bytes, otherwise
-** subsequent behavior is undefined.
-** ^When pMem is not NULL, SQLite will strive to use the memory provided
-** to satisfy page cache needs, falling back to [sqlite3_malloc()] if
-** a page cache line is larger than sz bytes or if all of the pMem buffer
-** is exhausted.
-** ^If pMem is NULL and N is non-zero, then each database connection
-** does an initial bulk allocation for page cache memory
-** from [sqlite3_malloc()] sufficient for N cache lines if N is positive or
-** of -1024*N bytes if N is negative, . ^If additional
-** page cache memory is needed beyond what is provided by the initial
-** allocation, then SQLite goes to [sqlite3_malloc()] separately for each
-** additional cache line.
-**
-** [[SQLITE_CONFIG_HEAP]] SQLITE_CONFIG_HEAP
-** ^The SQLITE_CONFIG_HEAP option specifies a static memory buffer
-** that SQLite will use for all of its dynamic memory allocation needs
-** beyond those provided for by [SQLITE_CONFIG_SCRATCH] and
-** [SQLITE_CONFIG_PAGECACHE].
-** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled
-** with either [SQLITE_ENABLE_MEMSYS3] or [SQLITE_ENABLE_MEMSYS5] and returns
-** [SQLITE_ERROR] if invoked otherwise.
-** ^There are three arguments to SQLITE_CONFIG_HEAP:
-** An 8-byte aligned pointer to the memory,
-** the number of bytes in the memory buffer, and the minimum allocation size.
-** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts
-** to using its default memory allocator (the system malloc() implementation),
-** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the
-** memory pointer is not NULL then the alternative memory
-** allocator is engaged to handle all of SQLites memory allocation needs.
-** The first pointer (the memory pointer) must be aligned to an 8-byte
-** boundary or subsequent behavior of SQLite will be undefined.
-** The minimum allocation size is capped at 2**12. Reasonable values
-** for the minimum allocation size are 2**5 through 2**8.
-**
-** [[SQLITE_CONFIG_MUTEX]] SQLITE_CONFIG_MUTEX
-** ^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a
-** pointer to an instance of the [sqlite3_mutex_methods] structure.
-** The argument specifies alternative low-level mutex routines to be used
-** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of
-** the content of the [sqlite3_mutex_methods] structure before the call to
-** [sqlite3_config()] returns. ^If SQLite is compiled with
-** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
-** the entire mutexing subsystem is omitted from the build and hence calls to
-** [sqlite3_config()] with the SQLITE_CONFIG_MUTEX configuration option will
-** return [SQLITE_ERROR].
-**
-** [[SQLITE_CONFIG_GETMUTEX]] SQLITE_CONFIG_GETMUTEX
-** ^(The SQLITE_CONFIG_GETMUTEX option takes a single argument which
-** is a pointer to an instance of the [sqlite3_mutex_methods] structure. The
-** [sqlite3_mutex_methods]
-** structure is filled with the currently defined mutex routines.)^
-** This option can be used to overload the default mutex allocation
-** routines with a wrapper used to track mutex usage for performance
-** profiling or testing, for example. ^If SQLite is compiled with
-** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then
-** the entire mutexing subsystem is omitted from the build and hence calls to
-** [sqlite3_config()] with the SQLITE_CONFIG_GETMUTEX configuration option will
-** return [SQLITE_ERROR].
-**
-** [[SQLITE_CONFIG_LOOKASIDE]] SQLITE_CONFIG_LOOKASIDE
-** ^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine
-** the default size of lookaside memory on each [database connection].
-** The first argument is the
-** size of each lookaside buffer slot and the second is the number of
-** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE
-** sets the default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE]
-** option to [sqlite3_db_config()] can be used to change the lookaside
-** configuration on individual connections.)^
-**
-** [[SQLITE_CONFIG_PCACHE2]] SQLITE_CONFIG_PCACHE2
-** ^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is
-** a pointer to an [sqlite3_pcache_methods2] object. This object specifies
-** the interface to a custom page cache implementation.)^
-** ^SQLite makes a copy of the [sqlite3_pcache_methods2] object.
-**
-** [[SQLITE_CONFIG_GETPCACHE2]] SQLITE_CONFIG_GETPCACHE2
-** ^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which
-** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of
-** the current page cache implementation into that object.)^
-**
-** [[SQLITE_CONFIG_LOG]] SQLITE_CONFIG_LOG
-** The SQLITE_CONFIG_LOG option is used to configure the SQLite
-** global [error log].
-** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a
-** function with a call signature of void(*)(void*,int,const char*),
-** and a pointer to void. ^If the function pointer is not NULL, it is
-** invoked by [sqlite3_log()] to process each logging event. ^If the
-** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op.
-** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is
-** passed through as the first parameter to the application-defined logger
-** function whenever that function is invoked. ^The second parameter to
-** the logger function is a copy of the first parameter to the corresponding
-** [sqlite3_log()] call and is intended to be a [result code] or an
-** [extended result code]. ^The third parameter passed to the logger is
-** log message after formatting via [sqlite3_snprintf()].
-** The SQLite logging interface is not reentrant; the logger function
-** supplied by the application must not invoke any SQLite interface.
-** In a multi-threaded application, the application-defined logger
-** function must be threadsafe.
-**
-** [[SQLITE_CONFIG_URI]] SQLITE_CONFIG_URI
-** ^(The SQLITE_CONFIG_URI option takes a single argument of type int.
-** If non-zero, then URI handling is globally enabled. If the parameter is zero,
-** then URI handling is globally disabled.)^ ^If URI handling is globally
-** enabled, all filenames passed to [sqlite3_open()], [sqlite3_open_v2()],
-** [sqlite3_open16()] or
-** specified as part of [ATTACH] commands are interpreted as URIs, regardless
-** of whether or not the [SQLITE_OPEN_URI] flag is set when the database
-** connection is opened. ^If it is globally disabled, filenames are
-** only interpreted as URIs if the SQLITE_OPEN_URI flag is set when the
-** database connection is opened. ^(By default, URI handling is globally
-** disabled. The default value may be changed by compiling with the
-** [SQLITE_USE_URI] symbol defined.)^
-**
-** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]] SQLITE_CONFIG_COVERING_INDEX_SCAN
-** ^The SQLITE_CONFIG_COVERING_INDEX_SCAN option takes a single integer
-** argument which is interpreted as a boolean in order to enable or disable
-** the use of covering indices for full table scans in the query optimizer.
-** ^The default setting is determined
-** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on"
-** if that compile-time option is omitted.
-** The ability to disable the use of covering indices for full table scans
-** is because some incorrectly coded legacy applications might malfunction
-** when the optimization is enabled. Providing the ability to
-** disable the optimization allows the older, buggy application code to work
-** without change even with newer versions of SQLite.
-**
-** [[SQLITE_CONFIG_PCACHE]] [[SQLITE_CONFIG_GETPCACHE]]
-** SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE
-** These options are obsolete and should not be used by new code.
-** They are retained for backwards compatibility but are now no-ops.
-**
-**
-** [[SQLITE_CONFIG_SQLLOG]]
-** SQLITE_CONFIG_SQLLOG
-** This option is only available if sqlite is compiled with the
-** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should
-** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int).
-** The second should be of type (void*). The callback is invoked by the library
-** in three separate circumstances, identified by the value passed as the
-** fourth parameter. If the fourth parameter is 0, then the database connection
-** passed as the second argument has just been opened. The third argument
-** points to a buffer containing the name of the main database file. If the
-** fourth parameter is 1, then the SQL statement that the third parameter
-** points to has just been executed. Or, if the fourth parameter is 2, then
-** the connection being passed as the second parameter is being closed. The
-** third parameter is passed NULL In this case. An example of using this
-** configuration option can be seen in the "test_sqllog.c" source file in
-** the canonical SQLite source tree.
-**
-** [[SQLITE_CONFIG_MMAP_SIZE]]
-** SQLITE_CONFIG_MMAP_SIZE
-** ^SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values
-** that are the default mmap size limit (the default setting for
-** [PRAGMA mmap_size]) and the maximum allowed mmap size limit.
-** ^The default setting can be overridden by each database connection using
-** either the [PRAGMA mmap_size] command, or by using the
-** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size
-** will be silently truncated if necessary so that it does not exceed the
-** compile-time maximum mmap size set by the
-** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^
-** ^If either argument to this option is negative, then that argument is
-** changed to its compile-time default.
-**
-** [[SQLITE_CONFIG_WIN32_HEAPSIZE]]
-** SQLITE_CONFIG_WIN32_HEAPSIZE
-** ^The SQLITE_CONFIG_WIN32_HEAPSIZE option is only available if SQLite is
-** compiled for Windows with the [SQLITE_WIN32_MALLOC] pre-processor macro
-** defined. ^SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value
-** that specifies the maximum size of the created heap.
-**
-** [[SQLITE_CONFIG_PCACHE_HDRSZ]]
-** SQLITE_CONFIG_PCACHE_HDRSZ
-** ^The SQLITE_CONFIG_PCACHE_HDRSZ option takes a single parameter which
-** is a pointer to an integer and writes into that integer the number of extra
-** bytes per page required for each page in [SQLITE_CONFIG_PAGECACHE].
-** The amount of extra space required can change depending on the compiler,
-** target platform, and SQLite version.
-**
-** [[SQLITE_CONFIG_PMASZ]]
-** SQLITE_CONFIG_PMASZ
-** ^The SQLITE_CONFIG_PMASZ option takes a single parameter which
-** is an unsigned integer and sets the "Minimum PMA Size" for the multithreaded
-** sorter to that integer. The default minimum PMA Size is set by the
-** [SQLITE_SORTER_PMASZ] compile-time option. New threads are launched
-** to help with sort operations when multithreaded sorting
-** is enabled (using the [PRAGMA threads] command) and the amount of content
-** to be sorted exceeds the page size times the minimum of the
-** [PRAGMA cache_size] setting and this value.
-**
-** [[SQLITE_CONFIG_STMTJRNL_SPILL]]
-** SQLITE_CONFIG_STMTJRNL_SPILL
-** ^The SQLITE_CONFIG_STMTJRNL_SPILL option takes a single parameter which
-** becomes the [statement journal] spill-to-disk threshold.
-** [Statement journals] are held in memory until their size (in bytes)
-** exceeds this threshold, at which point they are written to disk.
-** Or if the threshold is -1, statement journals are always held
-** exclusively in memory.
-** Since many statement journals never become large, setting the spill
-** threshold to a value such as 64KiB can greatly reduce the amount of
-** I/O required to support statement rollback.
-** The default value for this setting is controlled by the
-** [SQLITE_STMTJRNL_SPILL] compile-time option.
-**
-*/
-#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */
-#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */
-#define SQLITE_CONFIG_SERIALIZED 3 /* nil */
-#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */
-#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */
-#define SQLITE_CONFIG_SCRATCH 6 /* void*, int sz, int N */
-#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */
-#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */
-#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */
-#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */
-#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */
-/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */
-#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */
-#define SQLITE_CONFIG_PCACHE 14 /* no-op */
-#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */
-#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */
-#define SQLITE_CONFIG_URI 17 /* int */
-#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */
-#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */
-#define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */
-#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */
-#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */
-#define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */
-#define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */
-#define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */
-#define SQLITE_CONFIG_STMTJRNL_SPILL 26 /* int nByte */
-
-/*
-** CAPI3REF: Database Connection Configuration Options
-**
-** These constants are the available integer configuration options that
-** can be passed as the second argument to the [sqlite3_db_config()] interface.
-**
-** New configuration options may be added in future releases of SQLite.
-** Existing configuration options might be discontinued. Applications
-** should check the return code from [sqlite3_db_config()] to make sure that
-** the call worked. ^The [sqlite3_db_config()] interface will return a
-** non-zero [error code] if a discontinued or unsupported configuration option
-** is invoked.
-**
-**
-** SQLITE_DBCONFIG_LOOKASIDE
-** ^This option takes three additional arguments that determine the
-** [lookaside memory allocator] configuration for the [database connection].
-** ^The first argument (the third parameter to [sqlite3_db_config()] is a
-** pointer to a memory buffer to use for lookaside memory.
-** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb
-** may be NULL in which case SQLite will allocate the
-** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the
-** size of each lookaside buffer slot. ^The third argument is the number of
-** slots. The size of the buffer in the first argument must be greater than
-** or equal to the product of the second and third arguments. The buffer
-** must be aligned to an 8-byte boundary. ^If the second argument to
-** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally
-** rounded down to the next smaller multiple of 8. ^(The lookaside memory
-** configuration for a database connection can only be changed when that
-** connection is not currently using lookaside memory, or in other words
-** when the "current value" returned by
-** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero.
-** Any attempt to change the lookaside memory configuration when lookaside
-** memory is in use leaves the configuration unchanged and returns
-** [SQLITE_BUSY].)^
-**
-** SQLITE_DBCONFIG_ENABLE_FKEY
-** ^This option is used to enable or disable the enforcement of
-** [foreign key constraints]. There should be two additional arguments.
-** The first argument is an integer which is 0 to disable FK enforcement,
-** positive to enable FK enforcement or negative to leave FK enforcement
-** unchanged. The second parameter is a pointer to an integer into which
-** is written 0 or 1 to indicate whether FK enforcement is off or on
-** following this call. The second parameter may be a NULL pointer, in
-** which case the FK enforcement setting is not reported back.
-**
-** SQLITE_DBCONFIG_ENABLE_TRIGGER
-** ^This option is used to enable or disable [CREATE TRIGGER | triggers].
-** There should be two additional arguments.
-** The first argument is an integer which is 0 to disable triggers,
-** positive to enable triggers or negative to leave the setting unchanged.
-** The second parameter is a pointer to an integer into which
-** is written 0 or 1 to indicate whether triggers are disabled or enabled
-** following this call. The second parameter may be a NULL pointer, in
-** which case the trigger setting is not reported back.
-**
-** SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER
-** ^This option is used to enable or disable the two-argument
-** version of the [fts3_tokenizer()] function which is part of the
-** [FTS3] full-text search engine extension.
-** There should be two additional arguments.
-** The first argument is an integer which is 0 to disable fts3_tokenizer() or
-** positive to enable fts3_tokenizer() or negative to leave the setting
-** unchanged.
-** The second parameter is a pointer to an integer into which
-** is written 0 or 1 to indicate whether fts3_tokenizer is disabled or enabled
-** following this call. The second parameter may be a NULL pointer, in
-** which case the new setting is not reported back.
-**
-** SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION
-** ^This option is used to enable or disable the [sqlite3_load_extension()]
-** interface independently of the [load_extension()] SQL function.
-** The [sqlite3_enable_load_extension()] API enables or disables both the
-** C-API [sqlite3_load_extension()] and the SQL function [load_extension()].
-** There should be two additional arguments.
-** When the first argument to this interface is 1, then only the C-API is
-** enabled and the SQL function remains disabled. If the first argument to
-** this interface is 0, then both the C-API and the SQL function are disabled.
-** If the first argument is -1, then no changes are made to state of either the
-** C-API or the SQL function.
-** The second parameter is a pointer to an integer into which
-** is written 0 or 1 to indicate whether [sqlite3_load_extension()] interface
-** is disabled or enabled following this call. The second parameter may
-** be a NULL pointer, in which case the new setting is not reported back.
-**
-**
-**
-*/
-#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */
-#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */
-#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */
-#define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */
-#define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */
-
-
-/*
-** CAPI3REF: Enable Or Disable Extended Result Codes
-** METHOD: sqlite3
-**
-** ^The sqlite3_extended_result_codes() routine enables or disables the
-** [extended result codes] feature of SQLite. ^The extended result
-** codes are disabled by default for historical compatibility.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_result_codes(sqlite3*, int onoff);
-
-/*
-** CAPI3REF: Last Insert Rowid
-** METHOD: sqlite3
-**
-** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables)
-** has a unique 64-bit signed
-** integer key called the [ROWID | "rowid"]. ^The rowid is always available
-** as an undeclared column named ROWID, OID, or _ROWID_ as long as those
-** names are not also used by explicitly declared columns. ^If
-** the table has a column of type [INTEGER PRIMARY KEY] then that column
-** is another alias for the rowid.
-**
-** ^The sqlite3_last_insert_rowid(D) interface returns the [rowid] of the
-** most recent successful [INSERT] into a rowid table or [virtual table]
-** on database connection D.
-** ^Inserts into [WITHOUT ROWID] tables are not recorded.
-** ^If no successful [INSERT]s into rowid tables
-** have ever occurred on the database connection D,
-** then sqlite3_last_insert_rowid(D) returns zero.
-**
-** ^(If an [INSERT] occurs within a trigger or within a [virtual table]
-** method, then this routine will return the [rowid] of the inserted
-** row as long as the trigger or virtual table method is running.
-** But once the trigger or virtual table method ends, the value returned
-** by this routine reverts to what it was before the trigger or virtual
-** table method began.)^
-**
-** ^An [INSERT] that fails due to a constraint violation is not a
-** successful [INSERT] and does not change the value returned by this
-** routine. ^Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK,
-** and INSERT OR ABORT make no changes to the return value of this
-** routine when their insertion fails. ^(When INSERT OR REPLACE
-** encounters a constraint violation, it does not fail. The
-** INSERT continues to completion after deleting rows that caused
-** the constraint problem so INSERT OR REPLACE will always change
-** the return value of this interface.)^
-**
-** ^For the purposes of this routine, an [INSERT] is considered to
-** be successful even if it is subsequently rolled back.
-**
-** This function is accessible to SQL statements via the
-** [last_insert_rowid() SQL function].
-**
-** If a separate thread performs a new [INSERT] on the same
-** database connection while the [sqlite3_last_insert_rowid()]
-** function is running and thus changes the last insert [rowid],
-** then the value returned by [sqlite3_last_insert_rowid()] is
-** unpredictable and might not equal either the old or the new
-** last insert [rowid].
-*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_last_insert_rowid(sqlite3*);
-
-/*
-** CAPI3REF: Count The Number Of Rows Modified
-** METHOD: sqlite3
-**
-** ^This function returns the number of rows modified, inserted or
-** deleted by the most recently completed INSERT, UPDATE or DELETE
-** statement on the database connection specified by the only parameter.
-** ^Executing any other type of SQL statement does not modify the value
-** returned by this function.
-**
-** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are
-** considered - auxiliary changes caused by [CREATE TRIGGER | triggers],
-** [foreign key actions] or [REPLACE] constraint resolution are not counted.
-**
-** Changes to a view that are intercepted by
-** [INSTEAD OF trigger | INSTEAD OF triggers] are not counted. ^The value
-** returned by sqlite3_changes() immediately after an INSERT, UPDATE or
-** DELETE statement run on a view is always zero. Only changes made to real
-** tables are counted.
-**
-** Things are more complicated if the sqlite3_changes() function is
-** executed while a trigger program is running. This may happen if the
-** program uses the [changes() SQL function], or if some other callback
-** function invokes sqlite3_changes() directly. Essentially:
-**
-**
-** ^(Before entering a trigger program the value returned by
-** sqlite3_changes() function is saved. After the trigger program
-** has finished, the original value is restored.)^
-**
-** ^(Within a trigger program each INSERT, UPDATE and DELETE
-** statement sets the value returned by sqlite3_changes()
-** upon completion as normal. Of course, this value will not include
-** any changes performed by sub-triggers, as the sqlite3_changes()
-** value will be saved and restored after each sub-trigger has run.)^
-**
-**
-** ^This means that if the changes() SQL function (or similar) is used
-** by the first INSERT, UPDATE or DELETE statement within a trigger, it
-** returns the value as set when the calling statement began executing.
-** ^If it is used by the second or subsequent such statement within a trigger
-** program, the value returned reflects the number of rows modified by the
-** previous INSERT, UPDATE or DELETE statement within the same trigger.
-**
-** See also the [sqlite3_total_changes()] interface, the
-** [count_changes pragma], and the [changes() SQL function].
-**
-** If a separate thread makes changes on the same database connection
-** while [sqlite3_changes()] is running then the value returned
-** is unpredictable and not meaningful.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_changes(sqlite3*);
-
-/*
-** CAPI3REF: Total Number Of Rows Modified
-** METHOD: sqlite3
-**
-** ^This function returns the total number of rows inserted, modified or
-** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed
-** since the database connection was opened, including those executed as
-** part of trigger programs. ^Executing any other type of SQL statement
-** does not affect the value returned by sqlite3_total_changes().
-**
-** ^Changes made as part of [foreign key actions] are included in the
-** count, but those made as part of REPLACE constraint resolution are
-** not. ^Changes to a view that are intercepted by INSTEAD OF triggers
-** are not counted.
-**
-** See also the [sqlite3_changes()] interface, the
-** [count_changes pragma], and the [total_changes() SQL function].
-**
-** If a separate thread makes changes on the same database connection
-** while [sqlite3_total_changes()] is running then the value
-** returned is unpredictable and not meaningful.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_total_changes(sqlite3*);
-
-/*
-** CAPI3REF: Interrupt A Long-Running Query
-** METHOD: sqlite3
-**
-** ^This function causes any pending database operation to abort and
-** return at its earliest opportunity. This routine is typically
-** called in response to a user action such as pressing "Cancel"
-** or Ctrl-C where the user wants a long query operation to halt
-** immediately.
-**
-** ^It is safe to call this routine from a thread different from the
-** thread that is currently running the database operation. But it
-** is not safe to call this routine with a [database connection] that
-** is closed or might close before sqlite3_interrupt() returns.
-**
-** ^If an SQL operation is very nearly finished at the time when
-** sqlite3_interrupt() is called, then it might not have an opportunity
-** to be interrupted and might continue to completion.
-**
-** ^An SQL operation that is interrupted will return [SQLITE_INTERRUPT].
-** ^If the interrupted SQL operation is an INSERT, UPDATE, or DELETE
-** that is inside an explicit transaction, then the entire transaction
-** will be rolled back automatically.
-**
-** ^The sqlite3_interrupt(D) call is in effect until all currently running
-** SQL statements on [database connection] D complete. ^Any new SQL statements
-** that are started after the sqlite3_interrupt() call and before the
-** running statements reaches zero are interrupted as if they had been
-** running prior to the sqlite3_interrupt() call. ^New SQL statements
-** that are started after the running statement count reaches zero are
-** not effected by the sqlite3_interrupt().
-** ^A call to sqlite3_interrupt(D) that occurs when there are no running
-** SQL statements is a no-op and has no effect on SQL statements
-** that are started after the sqlite3_interrupt() call returns.
-**
-** If the database connection closes while [sqlite3_interrupt()]
-** is running then bad things will likely happen.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_interrupt(sqlite3*);
-
-/*
-** CAPI3REF: Determine If An SQL Statement Is Complete
-**
-** These routines are useful during command-line input to determine if the
-** currently entered text seems to form a complete SQL statement or
-** if additional input is needed before sending the text into
-** SQLite for parsing. ^These routines return 1 if the input string
-** appears to be a complete SQL statement. ^A statement is judged to be
-** complete if it ends with a semicolon token and is not a prefix of a
-** well-formed CREATE TRIGGER statement. ^Semicolons that are embedded within
-** string literals or quoted identifier names or comments are not
-** independent tokens (they are part of the token in which they are
-** embedded) and thus do not count as a statement terminator. ^Whitespace
-** and comments that follow the final semicolon are ignored.
-**
-** ^These routines return 0 if the statement is incomplete. ^If a
-** memory allocation fails, then SQLITE_NOMEM is returned.
-**
-** ^These routines do not parse the SQL statements thus
-** will not detect syntactically incorrect SQL.
-**
-** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior
-** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked
-** automatically by sqlite3_complete16(). If that initialization fails,
-** then the return value from sqlite3_complete16() will be non-zero
-** regardless of whether or not the input SQL is complete.)^
-**
-** The input to [sqlite3_complete()] must be a zero-terminated
-** UTF-8 string.
-**
-** The input to [sqlite3_complete16()] must be a zero-terminated
-** UTF-16 string in native byte order.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_complete(const char *sql);
-SQLITE_API int SQLITE_STDCALL sqlite3_complete16(const void *sql);
-
-/*
-** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors
-** KEYWORDS: {busy-handler callback} {busy handler}
-** METHOD: sqlite3
-**
-** ^The sqlite3_busy_handler(D,X,P) routine sets a callback function X
-** that might be invoked with argument P whenever
-** an attempt is made to access a database table associated with
-** [database connection] D when another thread
-** or process has the table locked.
-** The sqlite3_busy_handler() interface is used to implement
-** [sqlite3_busy_timeout()] and [PRAGMA busy_timeout].
-**
-** ^If the busy callback is NULL, then [SQLITE_BUSY]
-** is returned immediately upon encountering the lock. ^If the busy callback
-** is not NULL, then the callback might be invoked with two arguments.
-**
-** ^The first argument to the busy handler is a copy of the void* pointer which
-** is the third argument to sqlite3_busy_handler(). ^The second argument to
-** the busy handler callback is the number of times that the busy handler has
-** been invoked previously for the same locking event. ^If the
-** busy callback returns 0, then no additional attempts are made to
-** access the database and [SQLITE_BUSY] is returned
-** to the application.
-** ^If the callback returns non-zero, then another attempt
-** is made to access the database and the cycle repeats.
-**
-** The presence of a busy handler does not guarantee that it will be invoked
-** when there is lock contention. ^If SQLite determines that invoking the busy
-** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY]
-** to the application instead of invoking the
-** busy handler.
-** Consider a scenario where one process is holding a read lock that
-** it is trying to promote to a reserved lock and
-** a second process is holding a reserved lock that it is trying
-** to promote to an exclusive lock. The first process cannot proceed
-** because it is blocked by the second and the second process cannot
-** proceed because it is blocked by the first. If both processes
-** invoke the busy handlers, neither will make any progress. Therefore,
-** SQLite returns [SQLITE_BUSY] for the first process, hoping that this
-** will induce the first process to release its read lock and allow
-** the second process to proceed.
-**
-** ^The default busy callback is NULL.
-**
-** ^(There can only be a single busy handler defined for each
-** [database connection]. Setting a new busy handler clears any
-** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()]
-** or evaluating [PRAGMA busy_timeout=N] will change the
-** busy handler and thus clear any previously set busy handler.
-**
-** The busy callback should not take any actions which modify the
-** database connection that invoked the busy handler. In other words,
-** the busy handler is not reentrant. Any such actions
-** result in undefined behavior.
-**
-** A busy handler must not close the database connection
-** or [prepared statement] that invoked the busy handler.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*);
-
-/*
-** CAPI3REF: Set A Busy Timeout
-** METHOD: sqlite3
-**
-** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps
-** for a specified amount of time when a table is locked. ^The handler
-** will sleep multiple times until at least "ms" milliseconds of sleeping
-** have accumulated. ^After at least "ms" milliseconds of sleeping,
-** the handler returns 0 which causes [sqlite3_step()] to return
-** [SQLITE_BUSY].
-**
-** ^Calling this routine with an argument less than or equal to zero
-** turns off all busy handlers.
-**
-** ^(There can only be a single busy handler for a particular
-** [database connection] at any given moment. If another busy handler
-** was defined (using [sqlite3_busy_handler()]) prior to calling
-** this routine, that other busy handler is cleared.)^
-**
-** See also: [PRAGMA busy_timeout]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_busy_timeout(sqlite3*, int ms);
-
-/*
-** CAPI3REF: Convenience Routines For Running Queries
-** METHOD: sqlite3
-**
-** This is a legacy interface that is preserved for backwards compatibility.
-** Use of this interface is not recommended.
-**
-** Definition: A result table is memory data structure created by the
-** [sqlite3_get_table()] interface. A result table records the
-** complete query results from one or more queries.
-**
-** The table conceptually has a number of rows and columns. But
-** these numbers are not part of the result table itself. These
-** numbers are obtained separately. Let N be the number of rows
-** and M be the number of columns.
-**
-** A result table is an array of pointers to zero-terminated UTF-8 strings.
-** There are (N+1)*M elements in the array. The first M pointers point
-** to zero-terminated strings that contain the names of the columns.
-** The remaining entries all point to query results. NULL values result
-** in NULL pointers. All other values are in their UTF-8 zero-terminated
-** string representation as returned by [sqlite3_column_text()].
-**
-** A result table might consist of one or more memory allocations.
-** It is not safe to pass a result table directly to [sqlite3_free()].
-** A result table should be deallocated using [sqlite3_free_table()].
-**
-** ^(As an example of the result table format, suppose a query result
-** is as follows:
-**
-**
-** Name | Age
-** -----------------------
-** Alice | 43
-** Bob | 28
-** Cindy | 21
-**
-**
-** There are two column (M==2) and three rows (N==3). Thus the
-** result table has 8 entries. Suppose the result table is stored
-** in an array names azResult. Then azResult holds this content:
-**
-**
-** azResult[0] = "Name";
-** azResult[1] = "Age";
-** azResult[2] = "Alice";
-** azResult[3] = "43";
-** azResult[4] = "Bob";
-** azResult[5] = "28";
-** azResult[6] = "Cindy";
-** azResult[7] = "21";
-** )^
-**
-** ^The sqlite3_get_table() function evaluates one or more
-** semicolon-separated SQL statements in the zero-terminated UTF-8
-** string of its 2nd parameter and returns a result table to the
-** pointer given in its 3rd parameter.
-**
-** After the application has finished with the result from sqlite3_get_table(),
-** it must pass the result table pointer to sqlite3_free_table() in order to
-** release the memory that was malloced. Because of the way the
-** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling
-** function must not try to call [sqlite3_free()] directly. Only
-** [sqlite3_free_table()] is able to release the memory properly and safely.
-**
-** The sqlite3_get_table() interface is implemented as a wrapper around
-** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access
-** to any internal data structures of SQLite. It uses only the public
-** interface defined here. As a consequence, errors that occur in the
-** wrapper layer outside of the internal [sqlite3_exec()] call are not
-** reflected in subsequent calls to [sqlite3_errcode()] or
-** [sqlite3_errmsg()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_table(
- sqlite3 *db, /* An open database */
- const char *zSql, /* SQL to be evaluated */
- char ***pazResult, /* Results of the query */
- int *pnRow, /* Number of result rows written here */
- int *pnColumn, /* Number of result columns written here */
- char **pzErrmsg /* Error msg written here */
-);
-SQLITE_API void SQLITE_STDCALL sqlite3_free_table(char **result);
-
-/*
-** CAPI3REF: Formatted String Printing Functions
-**
-** These routines are work-alikes of the "printf()" family of functions
-** from the standard C library.
-** These routines understand most of the common K&R formatting options,
-** plus some additional non-standard formats, detailed below.
-** Note that some of the more obscure formatting options from recent
-** C-library standards are omitted from this implementation.
-**
-** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their
-** results into memory obtained from [sqlite3_malloc()].
-** The strings returned by these two routines should be
-** released by [sqlite3_free()]. ^Both routines return a
-** NULL pointer if [sqlite3_malloc()] is unable to allocate enough
-** memory to hold the resulting string.
-**
-** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from
-** the standard C library. The result is written into the
-** buffer supplied as the second parameter whose size is given by
-** the first parameter. Note that the order of the
-** first two parameters is reversed from snprintf().)^ This is an
-** historical accident that cannot be fixed without breaking
-** backwards compatibility. ^(Note also that sqlite3_snprintf()
-** returns a pointer to its buffer instead of the number of
-** characters actually written into the buffer.)^ We admit that
-** the number of characters written would be a more useful return
-** value but we cannot change the implementation of sqlite3_snprintf()
-** now without breaking compatibility.
-**
-** ^As long as the buffer size is greater than zero, sqlite3_snprintf()
-** guarantees that the buffer is always zero-terminated. ^The first
-** parameter "n" is the total size of the buffer, including space for
-** the zero terminator. So the longest string that can be completely
-** written will be n-1 characters.
-**
-** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf().
-**
-** These routines all implement some additional formatting
-** options that are useful for constructing SQL statements.
-** All of the usual printf() formatting options apply. In addition, there
-** is are "%q", "%Q", "%w" and "%z" options.
-**
-** ^(The %q option works like %s in that it substitutes a nul-terminated
-** string from the argument list. But %q also doubles every '\'' character.
-** %q is designed for use inside a string literal.)^ By doubling each '\''
-** character it escapes that character and allows it to be inserted into
-** the string.
-**
-** For example, assume the string variable zText contains text as follows:
-**
-**
-** char *zText = "It's a happy day!";
-**
-**
-** One can use this text in an SQL statement as follows:
-**
-**
-** char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES('%q')", zText);
-** sqlite3_exec(db, zSQL, 0, 0, 0);
-** sqlite3_free(zSQL);
-**
-**
-** Because the %q format string is used, the '\'' character in zText
-** is escaped and the SQL generated is as follows:
-**
-**
-** INSERT INTO table1 VALUES('It''s a happy day!')
-**
-**
-** This is correct. Had we used %s instead of %q, the generated SQL
-** would have looked like this:
-**
-**
-** INSERT INTO table1 VALUES('It's a happy day!');
-**
-**
-** This second example is an SQL syntax error. As a general rule you should
-** always use %q instead of %s when inserting text into a string literal.
-**
-** ^(The %Q option works like %q except it also adds single quotes around
-** the outside of the total string. Additionally, if the parameter in the
-** argument list is a NULL pointer, %Q substitutes the text "NULL" (without
-** single quotes).)^ So, for example, one could say:
-**
-**
-** char *zSQL = sqlite3_mprintf("INSERT INTO table VALUES(%Q)", zText);
-** sqlite3_exec(db, zSQL, 0, 0, 0);
-** sqlite3_free(zSQL);
-**
-**
-** The code above will render a correct SQL statement in the zSQL
-** variable even if the zText variable is a NULL pointer.
-**
-** ^(The "%w" formatting option is like "%q" except that it expects to
-** be contained within double-quotes instead of single quotes, and it
-** escapes the double-quote character instead of the single-quote
-** character.)^ The "%w" formatting option is intended for safely inserting
-** table and column names into a constructed SQL statement.
-**
-** ^(The "%z" formatting option works like "%s" but with the
-** addition that after the string has been read and copied into
-** the result, [sqlite3_free()] is called on the input string.)^
-*/
-SQLITE_API char *SQLITE_CDECL sqlite3_mprintf(const char*,...);
-SQLITE_API char *SQLITE_STDCALL sqlite3_vmprintf(const char*, va_list);
-SQLITE_API char *SQLITE_CDECL sqlite3_snprintf(int,char*,const char*, ...);
-SQLITE_API char *SQLITE_STDCALL sqlite3_vsnprintf(int,char*,const char*, va_list);
-
-/*
-** CAPI3REF: Memory Allocation Subsystem
-**
-** The SQLite core uses these three routines for all of its own
-** internal memory allocation needs. "Core" in the previous sentence
-** does not include operating-system specific VFS implementation. The
-** Windows VFS uses native malloc() and free() for some operations.
-**
-** ^The sqlite3_malloc() routine returns a pointer to a block
-** of memory at least N bytes in length, where N is the parameter.
-** ^If sqlite3_malloc() is unable to obtain sufficient free
-** memory, it returns a NULL pointer. ^If the parameter N to
-** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns
-** a NULL pointer.
-**
-** ^The sqlite3_malloc64(N) routine works just like
-** sqlite3_malloc(N) except that N is an unsigned 64-bit integer instead
-** of a signed 32-bit integer.
-**
-** ^Calling sqlite3_free() with a pointer previously returned
-** by sqlite3_malloc() or sqlite3_realloc() releases that memory so
-** that it might be reused. ^The sqlite3_free() routine is
-** a no-op if is called with a NULL pointer. Passing a NULL pointer
-** to sqlite3_free() is harmless. After being freed, memory
-** should neither be read nor written. Even reading previously freed
-** memory might result in a segmentation fault or other severe error.
-** Memory corruption, a segmentation fault, or other severe error
-** might result if sqlite3_free() is called with a non-NULL pointer that
-** was not obtained from sqlite3_malloc() or sqlite3_realloc().
-**
-** ^The sqlite3_realloc(X,N) interface attempts to resize a
-** prior memory allocation X to be at least N bytes.
-** ^If the X parameter to sqlite3_realloc(X,N)
-** is a NULL pointer then its behavior is identical to calling
-** sqlite3_malloc(N).
-** ^If the N parameter to sqlite3_realloc(X,N) is zero or
-** negative then the behavior is exactly the same as calling
-** sqlite3_free(X).
-** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation
-** of at least N bytes in size or NULL if insufficient memory is available.
-** ^If M is the size of the prior allocation, then min(N,M) bytes
-** of the prior allocation are copied into the beginning of buffer returned
-** by sqlite3_realloc(X,N) and the prior allocation is freed.
-** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the
-** prior allocation is not freed.
-**
-** ^The sqlite3_realloc64(X,N) interfaces works the same as
-** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead
-** of a 32-bit signed integer.
-**
-** ^If X is a memory allocation previously obtained from sqlite3_malloc(),
-** sqlite3_malloc64(), sqlite3_realloc(), or sqlite3_realloc64(), then
-** sqlite3_msize(X) returns the size of that memory allocation in bytes.
-** ^The value returned by sqlite3_msize(X) might be larger than the number
-** of bytes requested when X was allocated. ^If X is a NULL pointer then
-** sqlite3_msize(X) returns zero. If X points to something that is not
-** the beginning of memory allocation, or if it points to a formerly
-** valid memory allocation that has now been freed, then the behavior
-** of sqlite3_msize(X) is undefined and possibly harmful.
-**
-** ^The memory returned by sqlite3_malloc(), sqlite3_realloc(),
-** sqlite3_malloc64(), and sqlite3_realloc64()
-** is always aligned to at least an 8 byte boundary, or to a
-** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time
-** option is used.
-**
-** In SQLite version 3.5.0 and 3.5.1, it was possible to define
-** the SQLITE_OMIT_MEMORY_ALLOCATION which would cause the built-in
-** implementation of these routines to be omitted. That capability
-** is no longer provided. Only built-in memory allocators can be used.
-**
-** Prior to SQLite version 3.7.10, the Windows OS interface layer called
-** the system malloc() and free() directly when converting
-** filenames between the UTF-8 encoding used by SQLite
-** and whatever filename encoding is used by the particular Windows
-** installation. Memory allocation errors were detected, but
-** they were reported back as [SQLITE_CANTOPEN] or
-** [SQLITE_IOERR] rather than [SQLITE_NOMEM].
-**
-** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()]
-** must be either NULL or else pointers obtained from a prior
-** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have
-** not yet been released.
-**
-** The application must not read or write any part of
-** a block of memory after it has been released using
-** [sqlite3_free()] or [sqlite3_realloc()].
-*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc(int);
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc64(sqlite3_uint64);
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc(void*, int);
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void*, sqlite3_uint64);
-SQLITE_API void SQLITE_STDCALL sqlite3_free(void*);
-SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void*);
-
-/*
-** CAPI3REF: Memory Allocator Statistics
-**
-** SQLite provides these two interfaces for reporting on the status
-** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()]
-** routines, which form the built-in memory allocation subsystem.
-**
-** ^The [sqlite3_memory_used()] routine returns the number of bytes
-** of memory currently outstanding (malloced but not freed).
-** ^The [sqlite3_memory_highwater()] routine returns the maximum
-** value of [sqlite3_memory_used()] since the high-water mark
-** was last reset. ^The values returned by [sqlite3_memory_used()] and
-** [sqlite3_memory_highwater()] include any overhead
-** added by SQLite in its implementation of [sqlite3_malloc()],
-** but not overhead added by the any underlying system library
-** routines that [sqlite3_malloc()] may call.
-**
-** ^The memory high-water mark is reset to the current value of
-** [sqlite3_memory_used()] if and only if the parameter to
-** [sqlite3_memory_highwater()] is true. ^The value returned
-** by [sqlite3_memory_highwater(1)] is the high-water mark
-** prior to the reset.
-*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag);
-
-/*
-** CAPI3REF: Pseudo-Random Number Generator
-**
-** SQLite contains a high-quality pseudo-random number generator (PRNG) used to
-** select random [ROWID | ROWIDs] when inserting new records into a table that
-** already uses the largest possible [ROWID]. The PRNG is also used for
-** the build-in random() and randomblob() SQL functions. This interface allows
-** applications to access the same PRNG for other purposes.
-**
-** ^A call to this routine stores N bytes of randomness into buffer P.
-** ^The P parameter can be a NULL pointer.
-**
-** ^If this routine has not been previously called or if the previous
-** call had N less than one or a NULL pointer for P, then the PRNG is
-** seeded using randomness obtained from the xRandomness method of
-** the default [sqlite3_vfs] object.
-** ^If the previous call to this routine had an N of 1 or more and a
-** non-NULL P then the pseudo-randomness is generated
-** internally and without recourse to the [sqlite3_vfs] xRandomness
-** method.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_randomness(int N, void *P);
-
-/*
-** CAPI3REF: Compile-Time Authorization Callbacks
-** METHOD: sqlite3
-**
-** ^This routine registers an authorizer callback with a particular
-** [database connection], supplied in the first argument.
-** ^The authorizer callback is invoked as SQL statements are being compiled
-** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()],
-** [sqlite3_prepare16()] and [sqlite3_prepare16_v2()]. ^At various
-** points during the compilation process, as logic is being created
-** to perform various actions, the authorizer callback is invoked to
-** see if those actions are allowed. ^The authorizer callback should
-** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the
-** specific action but allow the SQL statement to continue to be
-** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be
-** rejected with an error. ^If the authorizer callback returns
-** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY]
-** then the [sqlite3_prepare_v2()] or equivalent call that triggered
-** the authorizer will fail with an error message.
-**
-** When the callback returns [SQLITE_OK], that means the operation
-** requested is ok. ^When the callback returns [SQLITE_DENY], the
-** [sqlite3_prepare_v2()] or equivalent call that triggered the
-** authorizer will fail with an error message explaining that
-** access is denied.
-**
-** ^The first parameter to the authorizer callback is a copy of the third
-** parameter to the sqlite3_set_authorizer() interface. ^The second parameter
-** to the callback is an integer [SQLITE_COPY | action code] that specifies
-** the particular action to be authorized. ^The third through sixth parameters
-** to the callback are zero-terminated strings that contain additional
-** details about the action to be authorized.
-**
-** ^If the action code is [SQLITE_READ]
-** and the callback returns [SQLITE_IGNORE] then the
-** [prepared statement] statement is constructed to substitute
-** a NULL value in place of the table column that would have
-** been read if [SQLITE_OK] had been returned. The [SQLITE_IGNORE]
-** return can be used to deny an untrusted user access to individual
-** columns of a table.
-** ^If the action code is [SQLITE_DELETE] and the callback returns
-** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the
-** [truncate optimization] is disabled and all rows are deleted individually.
-**
-** An authorizer is used when [sqlite3_prepare | preparing]
-** SQL statements from an untrusted source, to ensure that the SQL statements
-** do not try to access data they are not allowed to see, or that they do not
-** try to execute malicious statements that damage the database. For
-** example, an application may allow a user to enter arbitrary
-** SQL queries for evaluation by a database. But the application does
-** not want the user to be able to make arbitrary changes to the
-** database. An authorizer could then be put in place while the
-** user-entered SQL is being [sqlite3_prepare | prepared] that
-** disallows everything except [SELECT] statements.
-**
-** Applications that need to process SQL from untrusted sources
-** might also consider lowering resource limits using [sqlite3_limit()]
-** and limiting database size using the [max_page_count] [PRAGMA]
-** in addition to using an authorizer.
-**
-** ^(Only a single authorizer can be in place on a database connection
-** at a time. Each call to sqlite3_set_authorizer overrides the
-** previous call.)^ ^Disable the authorizer by installing a NULL callback.
-** The authorizer is disabled by default.
-**
-** The authorizer callback must not do anything that will modify
-** the database connection that invoked the authorizer callback.
-** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
-** database connections for the meaning of "modify" in this paragraph.
-**
-** ^When [sqlite3_prepare_v2()] is used to prepare a statement, the
-** statement might be re-prepared during [sqlite3_step()] due to a
-** schema change. Hence, the application should ensure that the
-** correct authorizer callback remains in place during the [sqlite3_step()].
-**
-** ^Note that the authorizer callback is invoked only during
-** [sqlite3_prepare()] or its variants. Authorization is not
-** performed during statement evaluation in [sqlite3_step()], unless
-** as stated in the previous paragraph, sqlite3_step() invokes
-** sqlite3_prepare_v2() to reprepare a statement after a schema change.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_set_authorizer(
- sqlite3*,
- int (*xAuth)(void*,int,const char*,const char*,const char*,const char*),
- void *pUserData
-);
-
-/*
-** CAPI3REF: Authorizer Return Codes
-**
-** The [sqlite3_set_authorizer | authorizer callback function] must
-** return either [SQLITE_OK] or one of these two constants in order
-** to signal SQLite whether or not the action is permitted. See the
-** [sqlite3_set_authorizer | authorizer documentation] for additional
-** information.
-**
-** Note that SQLITE_IGNORE is also used as a [conflict resolution mode]
-** returned from the [sqlite3_vtab_on_conflict()] interface.
-*/
-#define SQLITE_DENY 1 /* Abort the SQL statement with an error */
-#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */
-
-/*
-** CAPI3REF: Authorizer Action Codes
-**
-** The [sqlite3_set_authorizer()] interface registers a callback function
-** that is invoked to authorize certain SQL statement actions. The
-** second parameter to the callback is an integer code that specifies
-** what action is being authorized. These are the integer action codes that
-** the authorizer callback may be passed.
-**
-** These action code values signify what kind of operation is to be
-** authorized. The 3rd and 4th parameters to the authorization
-** callback function will be parameters or NULL depending on which of these
-** codes is used as the second parameter. ^(The 5th parameter to the
-** authorizer callback is the name of the database ("main", "temp",
-** etc.) if applicable.)^ ^The 6th parameter to the authorizer callback
-** is the name of the inner-most trigger or view that is responsible for
-** the access attempt or NULL if this access attempt is directly from
-** top-level SQL code.
-*/
-/******************************************* 3rd ************ 4th ***********/
-#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */
-#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */
-#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */
-#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */
-#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */
-#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */
-#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */
-#define SQLITE_CREATE_VIEW 8 /* View Name NULL */
-#define SQLITE_DELETE 9 /* Table Name NULL */
-#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */
-#define SQLITE_DROP_TABLE 11 /* Table Name NULL */
-#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */
-#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */
-#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */
-#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */
-#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */
-#define SQLITE_DROP_VIEW 17 /* View Name NULL */
-#define SQLITE_INSERT 18 /* Table Name NULL */
-#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */
-#define SQLITE_READ 20 /* Table Name Column Name */
-#define SQLITE_SELECT 21 /* NULL NULL */
-#define SQLITE_TRANSACTION 22 /* Operation NULL */
-#define SQLITE_UPDATE 23 /* Table Name Column Name */
-#define SQLITE_ATTACH 24 /* Filename NULL */
-#define SQLITE_DETACH 25 /* Database Name NULL */
-#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */
-#define SQLITE_REINDEX 27 /* Index Name NULL */
-#define SQLITE_ANALYZE 28 /* Table Name NULL */
-#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */
-#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */
-#define SQLITE_FUNCTION 31 /* NULL Function Name */
-#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */
-#define SQLITE_COPY 0 /* No longer used */
-#define SQLITE_RECURSIVE 33 /* NULL NULL */
-
-/*
-** CAPI3REF: Tracing And Profiling Functions
-** METHOD: sqlite3
-**
-** These routines are deprecated. Use the [sqlite3_trace_v2()] interface
-** instead of the routines described here.
-**
-** These routines register callback functions that can be used for
-** tracing and profiling the execution of SQL statements.
-**
-** ^The callback function registered by sqlite3_trace() is invoked at
-** various times when an SQL statement is being run by [sqlite3_step()].
-** ^The sqlite3_trace() callback is invoked with a UTF-8 rendering of the
-** SQL statement text as the statement first begins executing.
-** ^(Additional sqlite3_trace() callbacks might occur
-** as each triggered subprogram is entered. The callbacks for triggers
-** contain a UTF-8 SQL comment that identifies the trigger.)^
-**
-** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit
-** the length of [bound parameter] expansion in the output of sqlite3_trace().
-**
-** ^The callback function registered by sqlite3_profile() is invoked
-** as each SQL statement finishes. ^The profile callback contains
-** the original statement text and an estimate of wall-clock time
-** of how long that statement took to run. ^The profile callback
-** time is in units of nanoseconds, however the current implementation
-** is only capable of millisecond resolution so the six least significant
-** digits in the time are meaningless. Future versions of SQLite
-** might provide greater resolution on the profiler callback. The
-** sqlite3_profile() function is considered experimental and is
-** subject to change in future versions of SQLite.
-*/
-SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_trace(sqlite3*,
- void(*xTrace)(void*,const char*), void*);
-SQLITE_API SQLITE_DEPRECATED void *SQLITE_STDCALL sqlite3_profile(sqlite3*,
- void(*xProfile)(void*,const char*,sqlite3_uint64), void*);
-
-/*
-** CAPI3REF: SQL Trace Event Codes
-** KEYWORDS: SQLITE_TRACE
-**
-** These constants identify classes of events that can be monitored
-** using the [sqlite3_trace_v2()] tracing logic. The third argument
-** to [sqlite3_trace_v2()] is an OR-ed combination of one or more of
-** the following constants. ^The first argument to the trace callback
-** is one of the following constants.
-**
-** New tracing constants may be added in future releases.
-**
-** ^A trace callback has four arguments: xCallback(T,C,P,X).
-** ^The T argument is one of the integer type codes above.
-** ^The C argument is a copy of the context pointer passed in as the
-** fourth argument to [sqlite3_trace_v2()].
-** The P and X arguments are pointers whose meanings depend on T.
-**
-**
-** [[SQLITE_TRACE_STMT]] SQLITE_TRACE_STMT
-** ^An SQLITE_TRACE_STMT callback is invoked when a prepared statement
-** first begins running and possibly at other times during the
-** execution of the prepared statement, such as at the start of each
-** trigger subprogram. ^The P argument is a pointer to the
-** [prepared statement]. ^The X argument is a pointer to a string which
-** is the unexpanded SQL text of the prepared statement or an SQL comment
-** that indicates the invocation of a trigger. ^The callback can compute
-** the same text that would have been returned by the legacy [sqlite3_trace()]
-** interface by using the X argument when X begins with "--" and invoking
-** [sqlite3_expanded_sql(P)] otherwise.
-**
-** [[SQLITE_TRACE_PROFILE]] SQLITE_TRACE_PROFILE
-** ^An SQLITE_TRACE_PROFILE callback provides approximately the same
-** information as is provided by the [sqlite3_profile()] callback.
-** ^The P argument is a pointer to the [prepared statement] and the
-** X argument points to a 64-bit integer which is the estimated of
-** the number of nanosecond that the prepared statement took to run.
-** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes.
-**
-** [[SQLITE_TRACE_ROW]] SQLITE_TRACE_ROW
-** ^An SQLITE_TRACE_ROW callback is invoked whenever a prepared
-** statement generates a single row of result.
-** ^The P argument is a pointer to the [prepared statement] and the
-** X argument is unused.
-**
-** [[SQLITE_TRACE_CLOSE]] SQLITE_TRACE_CLOSE
-** ^An SQLITE_TRACE_CLOSE callback is invoked when a database
-** connection closes.
-** ^The P argument is a pointer to the [database connection] object
-** and the X argument is unused.
-**
-*/
-#define SQLITE_TRACE_STMT 0x01
-#define SQLITE_TRACE_PROFILE 0x02
-#define SQLITE_TRACE_ROW 0x04
-#define SQLITE_TRACE_CLOSE 0x08
-
-/*
-** CAPI3REF: SQL Trace Hook
-** METHOD: sqlite3
-**
-** ^The sqlite3_trace_v2(D,M,X,P) interface registers a trace callback
-** function X against [database connection] D, using property mask M
-** and context pointer P. ^If the X callback is
-** NULL or if the M mask is zero, then tracing is disabled. The
-** M argument should be the bitwise OR-ed combination of
-** zero or more [SQLITE_TRACE] constants.
-**
-** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides
-** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2().
-**
-** ^The X callback is invoked whenever any of the events identified by
-** mask M occur. ^The integer return value from the callback is currently
-** ignored, though this may change in future releases. Callback
-** implementations should return zero to ensure future compatibility.
-**
-** ^A trace callback is invoked with four arguments: callback(T,C,P,X).
-** ^The T argument is one of the [SQLITE_TRACE]
-** constants to indicate why the callback was invoked.
-** ^The C argument is a copy of the context pointer.
-** The P and X arguments are pointers whose meanings depend on T.
-**
-** The sqlite3_trace_v2() interface is intended to replace the legacy
-** interfaces [sqlite3_trace()] and [sqlite3_profile()], both of which
-** are deprecated.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_trace_v2(
- sqlite3*,
- unsigned uMask,
- int(*xCallback)(unsigned,void*,void*,void*),
- void *pCtx
-);
-
-/*
-** CAPI3REF: Query Progress Callbacks
-** METHOD: sqlite3
-**
-** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback
-** function X to be invoked periodically during long running calls to
-** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for
-** database connection D. An example use for this
-** interface is to keep a GUI updated during a large query.
-**
-** ^The parameter P is passed through as the only parameter to the
-** callback function X. ^The parameter N is the approximate number of
-** [virtual machine instructions] that are evaluated between successive
-** invocations of the callback X. ^If N is less than one then the progress
-** handler is disabled.
-**
-** ^Only a single progress handler may be defined at one time per
-** [database connection]; setting a new progress handler cancels the
-** old one. ^Setting parameter X to NULL disables the progress handler.
-** ^The progress handler is also disabled by setting N to a value less
-** than 1.
-**
-** ^If the progress callback returns non-zero, the operation is
-** interrupted. This feature can be used to implement a
-** "Cancel" button on a GUI progress dialog box.
-**
-** The progress handler callback must not do anything that will modify
-** the database connection that invoked the progress handler.
-** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
-** database connections for the meaning of "modify" in this paragraph.
-**
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
-
-/*
-** CAPI3REF: Opening A New Database Connection
-** CONSTRUCTOR: sqlite3
-**
-** ^These routines open an SQLite database file as specified by the
-** filename argument. ^The filename argument is interpreted as UTF-8 for
-** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte
-** order for sqlite3_open16(). ^(A [database connection] handle is usually
-** returned in *ppDb, even if an error occurs. The only exception is that
-** if SQLite is unable to allocate memory to hold the [sqlite3] object,
-** a NULL will be written into *ppDb instead of a pointer to the [sqlite3]
-** object.)^ ^(If the database is opened (and/or created) successfully, then
-** [SQLITE_OK] is returned. Otherwise an [error code] is returned.)^ ^The
-** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain
-** an English language description of the error following a failure of any
-** of the sqlite3_open() routines.
-**
-** ^The default encoding will be UTF-8 for databases created using
-** sqlite3_open() or sqlite3_open_v2(). ^The default encoding for databases
-** created using sqlite3_open16() will be UTF-16 in the native byte order.
-**
-** Whether or not an error occurs when it is opened, resources
-** associated with the [database connection] handle should be released by
-** passing it to [sqlite3_close()] when it is no longer required.
-**
-** The sqlite3_open_v2() interface works like sqlite3_open()
-** except that it accepts two additional parameters for additional control
-** over the new database connection. ^(The flags parameter to
-** sqlite3_open_v2() can take one of
-** the following three values, optionally combined with the
-** [SQLITE_OPEN_NOMUTEX], [SQLITE_OPEN_FULLMUTEX], [SQLITE_OPEN_SHAREDCACHE],
-** [SQLITE_OPEN_PRIVATECACHE], and/or [SQLITE_OPEN_URI] flags:)^
-**
-**
-** ^([SQLITE_OPEN_READONLY]
-** The database is opened in read-only mode. If the database does not
-** already exist, an error is returned. )^
-**
-** ^([SQLITE_OPEN_READWRITE]
-** The database is opened for reading and writing if possible, or reading
-** only if the file is write protected by the operating system. In either
-** case the database must already exist, otherwise an error is returned. )^
-**
-** ^([SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
-** The database is opened for reading and writing, and is created if
-** it does not already exist. This is the behavior that is always used for
-** sqlite3_open() and sqlite3_open16(). )^
-**
-**
-** If the 3rd parameter to sqlite3_open_v2() is not one of the
-** combinations shown above optionally combined with other
-** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits]
-** then the behavior is undefined.
-**
-** ^If the [SQLITE_OPEN_NOMUTEX] flag is set, then the database connection
-** opens in the multi-thread [threading mode] as long as the single-thread
-** mode has not been set at compile-time or start-time. ^If the
-** [SQLITE_OPEN_FULLMUTEX] flag is set then the database connection opens
-** in the serialized [threading mode] unless single-thread was
-** previously selected at compile-time or start-time.
-** ^The [SQLITE_OPEN_SHAREDCACHE] flag causes the database connection to be
-** eligible to use [shared cache mode], regardless of whether or not shared
-** cache is enabled using [sqlite3_enable_shared_cache()]. ^The
-** [SQLITE_OPEN_PRIVATECACHE] flag causes the database connection to not
-** participate in [shared cache mode] even if it is enabled.
-**
-** ^The fourth parameter to sqlite3_open_v2() is the name of the
-** [sqlite3_vfs] object that defines the operating system interface that
-** the new database connection should use. ^If the fourth parameter is
-** a NULL pointer then the default [sqlite3_vfs] object is used.
-**
-** ^If the filename is ":memory:", then a private, temporary in-memory database
-** is created for the connection. ^This in-memory database will vanish when
-** the database connection is closed. Future versions of SQLite might
-** make use of additional special filenames that begin with the ":" character.
-** It is recommended that when a database filename actually does begin with
-** a ":" character you should prefix the filename with a pathname such as
-** "./" to avoid ambiguity.
-**
-** ^If the filename is an empty string, then a private, temporary
-** on-disk database will be created. ^This private database will be
-** automatically deleted as soon as the database connection is closed.
-**
-** [[URI filenames in sqlite3_open()]] URI Filenames
-**
-** ^If [URI filename] interpretation is enabled, and the filename argument
-** begins with "file:", then the filename is interpreted as a URI. ^URI
-** filename interpretation is enabled if the [SQLITE_OPEN_URI] flag is
-** set in the fourth argument to sqlite3_open_v2(), or if it has
-** been enabled globally using the [SQLITE_CONFIG_URI] option with the
-** [sqlite3_config()] method or by the [SQLITE_USE_URI] compile-time option.
-** As of SQLite version 3.7.7, URI filename interpretation is turned off
-** by default, but future releases of SQLite might enable URI filename
-** interpretation by default. See "[URI filenames]" for additional
-** information.
-**
-** URI filenames are parsed according to RFC 3986. ^If the URI contains an
-** authority, then it must be either an empty string or the string
-** "localhost". ^If the authority is not an empty string or "localhost", an
-** error is returned to the caller. ^The fragment component of a URI, if
-** present, is ignored.
-**
-** ^SQLite uses the path component of the URI as the name of the disk file
-** which contains the database. ^If the path begins with a '/' character,
-** then it is interpreted as an absolute path. ^If the path does not begin
-** with a '/' (meaning that the authority section is omitted from the URI)
-** then the path is interpreted as a relative path.
-** ^(On windows, the first component of an absolute path
-** is a drive specification (e.g. "C:").)^
-**
-** [[core URI query parameters]]
-** The query component of a URI may contain parameters that are interpreted
-** either by SQLite itself, or by a [VFS | custom VFS implementation].
-** SQLite and its built-in [VFSes] interpret the
-** following query parameters:
-**
-**
-** vfs : ^The "vfs" parameter may be used to specify the name of
-** a VFS object that provides the operating system interface that should
-** be used to access the database file on disk. ^If this option is set to
-** an empty string the default VFS object is used. ^Specifying an unknown
-** VFS is an error. ^If sqlite3_open_v2() is used and the vfs option is
-** present, then the VFS specified by the option takes precedence over
-** the value passed as the fourth parameter to sqlite3_open_v2().
-**
-** mode : ^(The mode parameter may be set to either "ro", "rw",
-** "rwc", or "memory". Attempting to set it to any other value is
-** an error)^.
-** ^If "ro" is specified, then the database is opened for read-only
-** access, just as if the [SQLITE_OPEN_READONLY] flag had been set in the
-** third argument to sqlite3_open_v2(). ^If the mode option is set to
-** "rw", then the database is opened for read-write (but not create)
-** access, as if SQLITE_OPEN_READWRITE (but not SQLITE_OPEN_CREATE) had
-** been set. ^Value "rwc" is equivalent to setting both
-** SQLITE_OPEN_READWRITE and SQLITE_OPEN_CREATE. ^If the mode option is
-** set to "memory" then a pure [in-memory database] that never reads
-** or writes from disk is used. ^It is an error to specify a value for
-** the mode parameter that is less restrictive than that specified by
-** the flags passed in the third parameter to sqlite3_open_v2().
-**
-** cache : ^The cache parameter may be set to either "shared" or
-** "private". ^Setting it to "shared" is equivalent to setting the
-** SQLITE_OPEN_SHAREDCACHE bit in the flags argument passed to
-** sqlite3_open_v2(). ^Setting the cache parameter to "private" is
-** equivalent to setting the SQLITE_OPEN_PRIVATECACHE bit.
-** ^If sqlite3_open_v2() is used and the "cache" parameter is present in
-** a URI filename, its value overrides any behavior requested by setting
-** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag.
-**
-** psow : ^The psow parameter indicates whether or not the
-** [powersafe overwrite] property does or does not apply to the
-** storage media on which the database file resides.
-**
-** nolock : ^The nolock parameter is a boolean query parameter
-** which if set disables file locking in rollback journal modes. This
-** is useful for accessing a database on a filesystem that does not
-** support locking. Caution: Database corruption might result if two
-** or more processes write to the same database and any one of those
-** processes uses nolock=1.
-**
-** immutable : ^The immutable parameter is a boolean query
-** parameter that indicates that the database file is stored on
-** read-only media. ^When immutable is set, SQLite assumes that the
-** database file cannot be changed, even by a process with higher
-** privilege, and so the database is opened read-only and all locking
-** and change detection is disabled. Caution: Setting the immutable
-** property on a database file that does in fact change can result
-** in incorrect query results and/or [SQLITE_CORRUPT] errors.
-** See also: [SQLITE_IOCAP_IMMUTABLE].
-**
-**
-**
-** ^Specifying an unknown parameter in the query component of a URI is not an
-** error. Future versions of SQLite might understand additional query
-** parameters. See "[query parameters with special meaning to SQLite]" for
-** additional information.
-**
-** [[URI filename examples]] URI filename examples
-**
-**
-** URI filenames Results
-** file:data.db
-** Open the file "data.db" in the current directory.
-** file:/home/fred/data.db
-** file:///home/fred/data.db
-** file://localhost/home/fred/data.db
-** Open the database file "/home/fred/data.db".
-** file://darkstar/home/fred/data.db
-** An error. "darkstar" is not a recognized authority.
-**
-** file:///C:/Documents%20and%20Settings/fred/Desktop/data.db
-** Windows only: Open the file "data.db" on fred's desktop on drive
-** C:. Note that the %20 escaping in this example is not strictly
-** necessary - space characters can be used literally
-** in URI filenames.
-** file:data.db?mode=ro&cache=private
-** Open file "data.db" in the current directory for read-only access.
-** Regardless of whether or not shared-cache mode is enabled by
-** default, use a private cache.
-** file:/home/fred/data.db?vfs=unix-dotfile
-** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile"
-** that uses dot-files in place of posix advisory locking.
-** file:data.db?mode=readonly
-** An error. "readonly" is not a valid option for the "mode" parameter.
-**
-**
-** ^URI hexadecimal escape sequences (%HH) are supported within the path and
-** query components of a URI. A hexadecimal escape sequence consists of a
-** percent sign - "%" - followed by exactly two hexadecimal digits
-** specifying an octet value. ^Before the path or query components of a
-** URI filename are interpreted, they are encoded using UTF-8 and all
-** hexadecimal escape sequences replaced by a single byte containing the
-** corresponding octet. If this process generates an invalid UTF-8 encoding,
-** the results are undefined.
-**
-** Note to Windows users: The encoding used for the filename argument
-** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever
-** codepage is currently defined. Filenames containing international
-** characters must be converted to UTF-8 prior to passing them into
-** sqlite3_open() or sqlite3_open_v2().
-**
-** Note to Windows Runtime users: The temporary directory must be set
-** prior to calling sqlite3_open() or sqlite3_open_v2(). Otherwise, various
-** features that require the use of temporary files may fail.
-**
-** See also: [sqlite3_temp_directory]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_open(
- const char *filename, /* Database filename (UTF-8) */
- sqlite3 **ppDb /* OUT: SQLite db handle */
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_open16(
- const void *filename, /* Database filename (UTF-16) */
- sqlite3 **ppDb /* OUT: SQLite db handle */
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_open_v2(
- const char *filename, /* Database filename (UTF-8) */
- sqlite3 **ppDb, /* OUT: SQLite db handle */
- int flags, /* Flags */
- const char *zVfs /* Name of VFS module to use */
-);
-
-/*
-** CAPI3REF: Obtain Values For URI Parameters
-**
-** These are utility routines, useful to VFS implementations, that check
-** to see if a database file was a URI that contained a specific query
-** parameter, and if so obtains the value of that query parameter.
-**
-** If F is the database filename pointer passed into the xOpen() method of
-** a VFS implementation when the flags parameter to xOpen() has one or
-** more of the [SQLITE_OPEN_URI] or [SQLITE_OPEN_MAIN_DB] bits set and
-** P is the name of the query parameter, then
-** sqlite3_uri_parameter(F,P) returns the value of the P
-** parameter if it exists or a NULL pointer if P does not appear as a
-** query parameter on F. If P is a query parameter of F
-** has no explicit value, then sqlite3_uri_parameter(F,P) returns
-** a pointer to an empty string.
-**
-** The sqlite3_uri_boolean(F,P,B) routine assumes that P is a boolean
-** parameter and returns true (1) or false (0) according to the value
-** of P. The sqlite3_uri_boolean(F,P,B) routine returns true (1) if the
-** value of query parameter P is one of "yes", "true", or "on" in any
-** case or if the value begins with a non-zero number. The
-** sqlite3_uri_boolean(F,P,B) routines returns false (0) if the value of
-** query parameter P is one of "no", "false", or "off" in any case or
-** if the value begins with a numeric zero. If P is not a query
-** parameter on F or if the value of P is does not match any of the
-** above, then sqlite3_uri_boolean(F,P,B) returns (B!=0).
-**
-** The sqlite3_uri_int64(F,P,D) routine converts the value of P into a
-** 64-bit signed integer and returns that integer, or D if P does not
-** exist. If the value of P is something other than an integer, then
-** zero is returned.
-**
-** If F is a NULL pointer, then sqlite3_uri_parameter(F,P) returns NULL and
-** sqlite3_uri_boolean(F,P,B) returns B. If F is not a NULL pointer and
-** is not a database file pathname pointer that SQLite passed into the xOpen
-** VFS method, then the behavior of this routine is undefined and probably
-** undesirable.
-*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_uri_parameter(const char *zFilename, const char *zParam);
-SQLITE_API int SQLITE_STDCALL sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
-
-
-/*
-** CAPI3REF: Error Codes And Messages
-** METHOD: sqlite3
-**
-** ^If the most recent sqlite3_* API call associated with
-** [database connection] D failed, then the sqlite3_errcode(D) interface
-** returns the numeric [result code] or [extended result code] for that
-** API call.
-** If the most recent API call was successful,
-** then the return value from sqlite3_errcode() is undefined.
-** ^The sqlite3_extended_errcode()
-** interface is the same except that it always returns the
-** [extended result code] even when extended result codes are
-** disabled.
-**
-** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
-** text that describes the error, as either UTF-8 or UTF-16 respectively.
-** ^(Memory to hold the error message string is managed internally.
-** The application does not need to worry about freeing the result.
-** However, the error string might be overwritten or deallocated by
-** subsequent calls to other SQLite interface functions.)^
-**
-** ^The sqlite3_errstr() interface returns the English-language text
-** that describes the [result code], as UTF-8.
-** ^(Memory to hold the error message string is managed internally
-** and must not be freed by the application)^.
-**
-** When the serialized [threading mode] is in use, it might be the
-** case that a second error occurs on a separate thread in between
-** the time of the first error and the call to these interfaces.
-** When that happens, the second error will be reported since these
-** interfaces always report the most recent result. To avoid
-** this, each thread can obtain exclusive use of the [database connection] D
-** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning
-** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after
-** all calls to the interfaces listed here are completed.
-**
-** If an interface fails with SQLITE_MISUSE, that means the interface
-** was invoked incorrectly by the application. In that case, the
-** error code and message may or may not be set.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_errcode(sqlite3 *db);
-SQLITE_API int SQLITE_STDCALL sqlite3_extended_errcode(sqlite3 *db);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errmsg(sqlite3*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_errmsg16(sqlite3*);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_errstr(int);
-
-/*
-** CAPI3REF: Prepared Statement Object
-** KEYWORDS: {prepared statement} {prepared statements}
-**
-** An instance of this object represents a single SQL statement that
-** has been compiled into binary form and is ready to be evaluated.
-**
-** Think of each SQL statement as a separate computer program. The
-** original SQL text is source code. A prepared statement object
-** is the compiled object code. All SQL must be converted into a
-** prepared statement before it can be run.
-**
-** The life-cycle of a prepared statement object usually goes like this:
-**
-**
-** Create the prepared statement object using [sqlite3_prepare_v2()].
-** Bind values to [parameters] using the sqlite3_bind_*()
-** interfaces.
-** Run the SQL by calling [sqlite3_step()] one or more times.
-** Reset the prepared statement using [sqlite3_reset()] then go back
-** to step 2. Do this zero or more times.
-** Destroy the object using [sqlite3_finalize()].
-**
-*/
-typedef struct sqlite3_stmt sqlite3_stmt;
-
-/*
-** CAPI3REF: Run-time Limits
-** METHOD: sqlite3
-**
-** ^(This interface allows the size of various constructs to be limited
-** on a connection by connection basis. The first parameter is the
-** [database connection] whose limit is to be set or queried. The
-** second parameter is one of the [limit categories] that define a
-** class of constructs to be size limited. The third parameter is the
-** new limit for that construct.)^
-**
-** ^If the new limit is a negative number, the limit is unchanged.
-** ^(For each limit category SQLITE_LIMIT_NAME there is a
-** [limits | hard upper bound]
-** set at compile-time by a C preprocessor macro called
-** [limits | SQLITE_MAX_NAME ].
-** (The "_LIMIT_" in the name is changed to "_MAX_".))^
-** ^Attempts to increase a limit above its hard upper bound are
-** silently truncated to the hard upper bound.
-**
-** ^Regardless of whether or not the limit was changed, the
-** [sqlite3_limit()] interface returns the prior value of the limit.
-** ^Hence, to find the current value of a limit without changing it,
-** simply invoke this interface with the third parameter set to -1.
-**
-** Run-time limits are intended for use in applications that manage
-** both their own internal database and also databases that are controlled
-** by untrusted external sources. An example application might be a
-** web browser that has its own databases for storing history and
-** separate databases controlled by JavaScript applications downloaded
-** off the Internet. The internal databases can be given the
-** large, default limits. Databases managed by external sources can
-** be given much smaller limits designed to prevent a denial of service
-** attack. Developers might also want to use the [sqlite3_set_authorizer()]
-** interface to further control untrusted SQL. The size of the database
-** created by an untrusted script can be contained using the
-** [max_page_count] [PRAGMA].
-**
-** New run-time limit categories may be added in future releases.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_limit(sqlite3*, int id, int newVal);
-
-/*
-** CAPI3REF: Run-Time Limit Categories
-** KEYWORDS: {limit category} {*limit categories}
-**
-** These constants define various performance limits
-** that can be lowered at run-time using [sqlite3_limit()].
-** The synopsis of the meanings of the various limits is shown below.
-** Additional information is available at [limits | Limits in SQLite].
-**
-**
-** [[SQLITE_LIMIT_LENGTH]] ^(SQLITE_LIMIT_LENGTH
-** The maximum size of any string or BLOB or table row, in bytes.)^
-**
-** [[SQLITE_LIMIT_SQL_LENGTH]] ^( SQLITE_LIMIT_SQL_LENGTH
-** The maximum length of an SQL statement, in bytes. )^
-**
-** [[SQLITE_LIMIT_COLUMN]] ^(SQLITE_LIMIT_COLUMN
-** The maximum number of columns in a table definition or in the
-** result set of a [SELECT] or the maximum number of columns in an index
-** or in an ORDER BY or GROUP BY clause. )^
-**
-** [[SQLITE_LIMIT_EXPR_DEPTH]] ^(SQLITE_LIMIT_EXPR_DEPTH
-** The maximum depth of the parse tree on any expression. )^
-**
-** [[SQLITE_LIMIT_COMPOUND_SELECT]] ^(SQLITE_LIMIT_COMPOUND_SELECT
-** The maximum number of terms in a compound SELECT statement. )^
-**
-** [[SQLITE_LIMIT_VDBE_OP]] ^(SQLITE_LIMIT_VDBE_OP
-** The maximum number of instructions in a virtual machine program
-** used to implement an SQL statement. This limit is not currently
-** enforced, though that might be added in some future release of
-** SQLite. )^
-**
-** [[SQLITE_LIMIT_FUNCTION_ARG]] ^(SQLITE_LIMIT_FUNCTION_ARG
-** The maximum number of arguments on a function. )^
-**
-** [[SQLITE_LIMIT_ATTACHED]] ^(SQLITE_LIMIT_ATTACHED
-** The maximum number of [ATTACH | attached databases].)^
-**
-** [[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]]
-** ^(SQLITE_LIMIT_LIKE_PATTERN_LENGTH
-** The maximum length of the pattern argument to the [LIKE] or
-** [GLOB] operators. )^
-**
-** [[SQLITE_LIMIT_VARIABLE_NUMBER]]
-** ^(SQLITE_LIMIT_VARIABLE_NUMBER
-** The maximum index number of any [parameter] in an SQL statement.)^
-**
-** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^( SQLITE_LIMIT_TRIGGER_DEPTH
-** The maximum depth of recursion for triggers. )^
-**
-** [[SQLITE_LIMIT_WORKER_THREADS]] ^(SQLITE_LIMIT_WORKER_THREADS
-** The maximum number of auxiliary worker threads that a single
-** [prepared statement] may start. )^
-**
-*/
-#define SQLITE_LIMIT_LENGTH 0
-#define SQLITE_LIMIT_SQL_LENGTH 1
-#define SQLITE_LIMIT_COLUMN 2
-#define SQLITE_LIMIT_EXPR_DEPTH 3
-#define SQLITE_LIMIT_COMPOUND_SELECT 4
-#define SQLITE_LIMIT_VDBE_OP 5
-#define SQLITE_LIMIT_FUNCTION_ARG 6
-#define SQLITE_LIMIT_ATTACHED 7
-#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8
-#define SQLITE_LIMIT_VARIABLE_NUMBER 9
-#define SQLITE_LIMIT_TRIGGER_DEPTH 10
-#define SQLITE_LIMIT_WORKER_THREADS 11
-
-/*
-** CAPI3REF: Compiling An SQL Statement
-** KEYWORDS: {SQL statement compiler}
-** METHOD: sqlite3
-** CONSTRUCTOR: sqlite3_stmt
-**
-** To execute an SQL query, it must first be compiled into a byte-code
-** program using one of these routines.
-**
-** The first argument, "db", is a [database connection] obtained from a
-** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or
-** [sqlite3_open16()]. The database connection must not have been closed.
-**
-** The second argument, "zSql", is the statement to be compiled, encoded
-** as either UTF-8 or UTF-16. The sqlite3_prepare() and sqlite3_prepare_v2()
-** interfaces use UTF-8, and sqlite3_prepare16() and sqlite3_prepare16_v2()
-** use UTF-16.
-**
-** ^If the nByte argument is negative, then zSql is read up to the
-** first zero terminator. ^If nByte is positive, then it is the
-** number of bytes read from zSql. ^If nByte is zero, then no prepared
-** statement is generated.
-** If the caller knows that the supplied string is nul-terminated, then
-** there is a small performance advantage to passing an nByte parameter that
-** is the number of bytes in the input string including
-** the nul-terminator.
-**
-** ^If pzTail is not NULL then *pzTail is made to point to the first byte
-** past the end of the first SQL statement in zSql. These routines only
-** compile the first statement in zSql, so *pzTail is left pointing to
-** what remains uncompiled.
-**
-** ^*ppStmt is left pointing to a compiled [prepared statement] that can be
-** executed using [sqlite3_step()]. ^If there is an error, *ppStmt is set
-** to NULL. ^If the input text contains no SQL (if the input is an empty
-** string or a comment) then *ppStmt is set to NULL.
-** The calling procedure is responsible for deleting the compiled
-** SQL statement using [sqlite3_finalize()] after it has finished with it.
-** ppStmt may not be NULL.
-**
-** ^On success, the sqlite3_prepare() family of routines return [SQLITE_OK];
-** otherwise an [error code] is returned.
-**
-** The sqlite3_prepare_v2() and sqlite3_prepare16_v2() interfaces are
-** recommended for all new programs. The two older interfaces are retained
-** for backwards compatibility, but their use is discouraged.
-** ^In the "v2" interfaces, the prepared statement
-** that is returned (the [sqlite3_stmt] object) contains a copy of the
-** original SQL text. This causes the [sqlite3_step()] interface to
-** behave differently in three ways:
-**
-**
-**
-** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it
-** always used to do, [sqlite3_step()] will automatically recompile the SQL
-** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY]
-** retries will occur before sqlite3_step() gives up and returns an error.
-**
-**
-**
-** ^When an error occurs, [sqlite3_step()] will return one of the detailed
-** [error codes] or [extended error codes]. ^The legacy behavior was that
-** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code
-** and the application would have to make a second call to [sqlite3_reset()]
-** in order to find the underlying cause of the problem. With the "v2" prepare
-** interfaces, the underlying reason for the error is returned immediately.
-**
-**
-**
-** ^If the specific value bound to [parameter | host parameter] in the
-** WHERE clause might influence the choice of query plan for a statement,
-** then the statement will be automatically recompiled, as if there had been
-** a schema change, on the first [sqlite3_step()] call following any change
-** to the [sqlite3_bind_text | bindings] of that [parameter].
-** ^The specific value of WHERE-clause [parameter] might influence the
-** choice of query plan if the parameter is the left-hand side of a [LIKE]
-** or [GLOB] operator or if the parameter is compared to an indexed column
-** and the [SQLITE_ENABLE_STAT3] compile-time option is enabled.
-**
-**
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare(
- sqlite3 *db, /* Database handle */
- const char *zSql, /* SQL statement, UTF-8 encoded */
- int nByte, /* Maximum length of zSql in bytes. */
- sqlite3_stmt **ppStmt, /* OUT: Statement handle */
- const char **pzTail /* OUT: Pointer to unused portion of zSql */
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare_v2(
- sqlite3 *db, /* Database handle */
- const char *zSql, /* SQL statement, UTF-8 encoded */
- int nByte, /* Maximum length of zSql in bytes. */
- sqlite3_stmt **ppStmt, /* OUT: Statement handle */
- const char **pzTail /* OUT: Pointer to unused portion of zSql */
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16(
- sqlite3 *db, /* Database handle */
- const void *zSql, /* SQL statement, UTF-16 encoded */
- int nByte, /* Maximum length of zSql in bytes. */
- sqlite3_stmt **ppStmt, /* OUT: Statement handle */
- const void **pzTail /* OUT: Pointer to unused portion of zSql */
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_prepare16_v2(
- sqlite3 *db, /* Database handle */
- const void *zSql, /* SQL statement, UTF-16 encoded */
- int nByte, /* Maximum length of zSql in bytes. */
- sqlite3_stmt **ppStmt, /* OUT: Statement handle */
- const void **pzTail /* OUT: Pointer to unused portion of zSql */
-);
-
-/*
-** CAPI3REF: Retrieving Statement SQL
-** METHOD: sqlite3_stmt
-**
-** ^The sqlite3_sql(P) interface returns a pointer to a copy of the UTF-8
-** SQL text used to create [prepared statement] P if P was
-** created by either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()].
-** ^The sqlite3_expanded_sql(P) interface returns a pointer to a UTF-8
-** string containing the SQL text of prepared statement P with
-** [bound parameters] expanded.
-**
-** ^(For example, if a prepared statement is created using the SQL
-** text "SELECT $abc,:xyz" and if parameter $abc is bound to integer 2345
-** and parameter :xyz is unbound, then sqlite3_sql() will return
-** the original string, "SELECT $abc,:xyz" but sqlite3_expanded_sql()
-** will return "SELECT 2345,NULL".)^
-**
-** ^The sqlite3_expanded_sql() interface returns NULL if insufficient memory
-** is available to hold the result, or if the result would exceed the
-** the maximum string length determined by the [SQLITE_LIMIT_LENGTH].
-**
-** ^The [SQLITE_TRACE_SIZE_LIMIT] compile-time option limits the size of
-** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time
-** option causes sqlite3_expanded_sql() to always return NULL.
-**
-** ^The string returned by sqlite3_sql(P) is managed by SQLite and is
-** automatically freed when the prepared statement is finalized.
-** ^The string returned by sqlite3_expanded_sql(P), on the other hand,
-** is obtained from [sqlite3_malloc()] and must be free by the application
-** by passing it to [sqlite3_free()].
-*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_sql(sqlite3_stmt *pStmt);
-SQLITE_API char *SQLITE_STDCALL sqlite3_expanded_sql(sqlite3_stmt *pStmt);
-
-/*
-** CAPI3REF: Determine If An SQL Statement Writes The Database
-** METHOD: sqlite3_stmt
-**
-** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if
-** and only if the [prepared statement] X makes no direct changes to
-** the content of the database file.
-**
-** Note that [application-defined SQL functions] or
-** [virtual tables] might change the database indirectly as a side effect.
-** ^(For example, if an application defines a function "eval()" that
-** calls [sqlite3_exec()], then the following SQL statement would
-** change the database file through side-effects:
-**
-**
-** SELECT eval('DELETE FROM t1') FROM t2;
-**
-**
-** But because the [SELECT] statement does not change the database file
-** directly, sqlite3_stmt_readonly() would still return true.)^
-**
-** ^Transaction control statements such as [BEGIN], [COMMIT], [ROLLBACK],
-** [SAVEPOINT], and [RELEASE] cause sqlite3_stmt_readonly() to return true,
-** since the statements themselves do not actually modify the database but
-** rather they control the timing of when other statements modify the
-** database. ^The [ATTACH] and [DETACH] statements also cause
-** sqlite3_stmt_readonly() to return true since, while those statements
-** change the configuration of a database connection, they do not make
-** changes to the content of the database files on disk.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
-
-/*
-** CAPI3REF: Determine If A Prepared Statement Has Been Reset
-** METHOD: sqlite3_stmt
-**
-** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the
-** [prepared statement] S has been stepped at least once using
-** [sqlite3_step(S)] but has neither run to completion (returned
-** [SQLITE_DONE] from [sqlite3_step(S)]) nor
-** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S)
-** interface returns false if S is a NULL pointer. If S is not a
-** NULL pointer and is not a pointer to a valid [prepared statement]
-** object, then the behavior is undefined and probably undesirable.
-**
-** This interface can be used in combination [sqlite3_next_stmt()]
-** to locate all prepared statements associated with a database
-** connection that are in need of being reset. This can be used,
-** for example, in diagnostic routines to search for prepared
-** statements that are holding a transaction open.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_busy(sqlite3_stmt*);
-
-/*
-** CAPI3REF: Dynamically Typed Value Object
-** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value}
-**
-** SQLite uses the sqlite3_value object to represent all values
-** that can be stored in a database table. SQLite uses dynamic typing
-** for the values it stores. ^Values stored in sqlite3_value objects
-** can be integers, floating point values, strings, BLOBs, or NULL.
-**
-** An sqlite3_value object may be either "protected" or "unprotected".
-** Some interfaces require a protected sqlite3_value. Other interfaces
-** will accept either a protected or an unprotected sqlite3_value.
-** Every interface that accepts sqlite3_value arguments specifies
-** whether or not it requires a protected sqlite3_value. The
-** [sqlite3_value_dup()] interface can be used to construct a new
-** protected sqlite3_value from an unprotected sqlite3_value.
-**
-** The terms "protected" and "unprotected" refer to whether or not
-** a mutex is held. An internal mutex is held for a protected
-** sqlite3_value object but no mutex is held for an unprotected
-** sqlite3_value object. If SQLite is compiled to be single-threaded
-** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0)
-** or if SQLite is run in one of reduced mutex modes
-** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD]
-** then there is no distinction between protected and unprotected
-** sqlite3_value objects and they can be used interchangeably. However,
-** for maximum code portability it is recommended that applications
-** still make the distinction between protected and unprotected
-** sqlite3_value objects even when not strictly required.
-**
-** ^The sqlite3_value objects that are passed as parameters into the
-** implementation of [application-defined SQL functions] are protected.
-** ^The sqlite3_value object returned by
-** [sqlite3_column_value()] is unprotected.
-** Unprotected sqlite3_value objects may only be used with
-** [sqlite3_result_value()] and [sqlite3_bind_value()].
-** The [sqlite3_value_blob | sqlite3_value_type()] family of
-** interfaces require protected sqlite3_value objects.
-*/
-typedef struct Mem sqlite3_value;
-
-/*
-** CAPI3REF: SQL Function Context Object
-**
-** The context in which an SQL function executes is stored in an
-** sqlite3_context object. ^A pointer to an sqlite3_context object
-** is always first parameter to [application-defined SQL functions].
-** The application-defined SQL function implementation will pass this
-** pointer through into calls to [sqlite3_result_int | sqlite3_result()],
-** [sqlite3_aggregate_context()], [sqlite3_user_data()],
-** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()],
-** and/or [sqlite3_set_auxdata()].
-*/
-typedef struct sqlite3_context sqlite3_context;
-
-/*
-** CAPI3REF: Binding Values To Prepared Statements
-** KEYWORDS: {host parameter} {host parameters} {host parameter name}
-** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding}
-** METHOD: sqlite3_stmt
-**
-** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants,
-** literals may be replaced by a [parameter] that matches one of following
-** templates:
-**
-**
-** ?
-** ?NNN
-** :VVV
-** @VVV
-** $VVV
-**
-**
-** In the templates above, NNN represents an integer literal,
-** and VVV represents an alphanumeric identifier.)^ ^The values of these
-** parameters (also called "host parameter names" or "SQL parameters")
-** can be set using the sqlite3_bind_*() routines defined here.
-**
-** ^The first argument to the sqlite3_bind_*() routines is always
-** a pointer to the [sqlite3_stmt] object returned from
-** [sqlite3_prepare_v2()] or its variants.
-**
-** ^The second argument is the index of the SQL parameter to be set.
-** ^The leftmost SQL parameter has an index of 1. ^When the same named
-** SQL parameter is used more than once, second and subsequent
-** occurrences have the same index as the first occurrence.
-** ^The index for named parameters can be looked up using the
-** [sqlite3_bind_parameter_index()] API if desired. ^The index
-** for "?NNN" parameters is the value of NNN.
-** ^The NNN value must be between 1 and the [sqlite3_limit()]
-** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 999).
-**
-** ^The third argument is the value to bind to the parameter.
-** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16()
-** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter
-** is ignored and the end result is the same as sqlite3_bind_null().
-**
-** ^(In those routines that have a fourth argument, its value is the
-** number of bytes in the parameter. To be clear: the value is the
-** number of bytes in the value, not the number of characters.)^
-** ^If the fourth parameter to sqlite3_bind_text() or sqlite3_bind_text16()
-** is negative, then the length of the string is
-** the number of bytes up to the first zero terminator.
-** If the fourth parameter to sqlite3_bind_blob() is negative, then
-** the behavior is undefined.
-** If a non-negative fourth parameter is provided to sqlite3_bind_text()
-** or sqlite3_bind_text16() or sqlite3_bind_text64() then
-** that parameter must be the byte offset
-** where the NUL terminator would occur assuming the string were NUL
-** terminated. If any NUL characters occur at byte offsets less than
-** the value of the fourth parameter then the resulting string value will
-** contain embedded NULs. The result of expressions involving strings
-** with embedded NULs is undefined.
-**
-** ^The fifth argument to the BLOB and string binding interfaces
-** is a destructor used to dispose of the BLOB or
-** string after SQLite has finished with it. ^The destructor is called
-** to dispose of the BLOB or string even if the call to bind API fails.
-** ^If the fifth argument is
-** the special value [SQLITE_STATIC], then SQLite assumes that the
-** information is in static, unmanaged space and does not need to be freed.
-** ^If the fifth argument has the value [SQLITE_TRANSIENT], then
-** SQLite makes its own private copy of the data immediately, before
-** the sqlite3_bind_*() routine returns.
-**
-** ^The sixth argument to sqlite3_bind_text64() must be one of
-** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]
-** to specify the encoding of the text in the third parameter. If
-** the sixth argument to sqlite3_bind_text64() is not one of the
-** allowed values shown above, or if the text encoding is different
-** from the encoding specified by the sixth parameter, then the behavior
-** is undefined.
-**
-** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that
-** is filled with zeroes. ^A zeroblob uses a fixed amount of memory
-** (just an integer to hold its size) while it is being processed.
-** Zeroblobs are intended to serve as placeholders for BLOBs whose
-** content is later written using
-** [sqlite3_blob_open | incremental BLOB I/O] routines.
-** ^A negative value for the zeroblob results in a zero-length BLOB.
-**
-** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer
-** for the [prepared statement] or with a prepared statement for which
-** [sqlite3_step()] has been called more recently than [sqlite3_reset()],
-** then the call will return [SQLITE_MISUSE]. If any sqlite3_bind_()
-** routine is passed a [prepared statement] that has been finalized, the
-** result is undefined and probably harmful.
-**
-** ^Bindings are not cleared by the [sqlite3_reset()] routine.
-** ^Unbound parameters are interpreted as NULL.
-**
-** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an
-** [error code] if anything goes wrong.
-** ^[SQLITE_TOOBIG] might be returned if the size of a string or BLOB
-** exceeds limits imposed by [sqlite3_limit]([SQLITE_LIMIT_LENGTH]) or
-** [SQLITE_MAX_LENGTH].
-** ^[SQLITE_RANGE] is returned if the parameter
-** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails.
-**
-** See also: [sqlite3_bind_parameter_count()],
-** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64,
- void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_double(sqlite3_stmt*, int, double);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int(sqlite3_stmt*, int, int);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_null(sqlite3_stmt*, int);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*));
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64,
- void(*)(void*), unsigned char encoding);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n);
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64);
-
-/*
-** CAPI3REF: Number Of SQL Parameters
-** METHOD: sqlite3_stmt
-**
-** ^This routine can be used to find the number of [SQL parameters]
-** in a [prepared statement]. SQL parameters are tokens of the
-** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as
-** placeholders for values that are [sqlite3_bind_blob | bound]
-** to the parameters at a later time.
-**
-** ^(This routine actually returns the index of the largest (rightmost)
-** parameter. For all forms except ?NNN, this will correspond to the
-** number of unique parameters. If parameters of the ?NNN form are used,
-** there may be gaps in the list.)^
-**
-** See also: [sqlite3_bind_blob|sqlite3_bind()],
-** [sqlite3_bind_parameter_name()], and
-** [sqlite3_bind_parameter_index()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_count(sqlite3_stmt*);
-
-/*
-** CAPI3REF: Name Of A Host Parameter
-** METHOD: sqlite3_stmt
-**
-** ^The sqlite3_bind_parameter_name(P,N) interface returns
-** the name of the N-th [SQL parameter] in the [prepared statement] P.
-** ^(SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA"
-** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA"
-** respectively.
-** In other words, the initial ":" or "$" or "@" or "?"
-** is included as part of the name.)^
-** ^Parameters of the form "?" without a following integer have no name
-** and are referred to as "nameless" or "anonymous parameters".
-**
-** ^The first host parameter has an index of 1, not 0.
-**
-** ^If the value N is out of range or if the N-th parameter is
-** nameless, then NULL is returned. ^The returned string is
-** always in UTF-8 encoding even if the named parameter was
-** originally specified as UTF-16 in [sqlite3_prepare16()] or
-** [sqlite3_prepare16_v2()].
-**
-** See also: [sqlite3_bind_blob|sqlite3_bind()],
-** [sqlite3_bind_parameter_count()], and
-** [sqlite3_bind_parameter_index()].
-*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_bind_parameter_name(sqlite3_stmt*, int);
-
-/*
-** CAPI3REF: Index Of A Parameter With A Given Name
-** METHOD: sqlite3_stmt
-**
-** ^Return the index of an SQL parameter given its name. ^The
-** index value returned is suitable for use as the second
-** parameter to [sqlite3_bind_blob|sqlite3_bind()]. ^A zero
-** is returned if no matching parameter is found. ^The parameter
-** name must be given in UTF-8 even if the original statement
-** was prepared from UTF-16 text using [sqlite3_prepare16_v2()].
-**
-** See also: [sqlite3_bind_blob|sqlite3_bind()],
-** [sqlite3_bind_parameter_count()], and
-** [sqlite3_bind_parameter_name()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName);
-
-/*
-** CAPI3REF: Reset All Bindings On A Prepared Statement
-** METHOD: sqlite3_stmt
-**
-** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset
-** the [sqlite3_bind_blob | bindings] on a [prepared statement].
-** ^Use this routine to reset all host parameters to NULL.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_clear_bindings(sqlite3_stmt*);
-
-/*
-** CAPI3REF: Number Of Columns In A Result Set
-** METHOD: sqlite3_stmt
-**
-** ^Return the number of columns in the result set returned by the
-** [prepared statement]. ^This routine returns 0 if pStmt is an SQL
-** statement that does not return data (for example an [UPDATE]).
-**
-** See also: [sqlite3_data_count()]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_column_count(sqlite3_stmt *pStmt);
-
-/*
-** CAPI3REF: Column Names In A Result Set
-** METHOD: sqlite3_stmt
-**
-** ^These routines return the name assigned to a particular column
-** in the result set of a [SELECT] statement. ^The sqlite3_column_name()
-** interface returns a pointer to a zero-terminated UTF-8 string
-** and sqlite3_column_name16() returns a pointer to a zero-terminated
-** UTF-16 string. ^The first parameter is the [prepared statement]
-** that implements the [SELECT] statement. ^The second parameter is the
-** column number. ^The leftmost column is number 0.
-**
-** ^The returned string pointer is valid until either the [prepared statement]
-** is destroyed by [sqlite3_finalize()] or until the statement is automatically
-** reprepared by the first call to [sqlite3_step()] for a particular run
-** or until the next call to
-** sqlite3_column_name() or sqlite3_column_name16() on the same column.
-**
-** ^If sqlite3_malloc() fails during the processing of either routine
-** (for example during a conversion from UTF-8 to UTF-16) then a
-** NULL pointer is returned.
-**
-** ^The name of a result column is the value of the "AS" clause for
-** that column, if there is an AS clause. If there is no AS clause
-** then the name of the column is unspecified and may change from
-** one release of SQLite to the next.
-*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_name(sqlite3_stmt*, int N);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_name16(sqlite3_stmt*, int N);
-
-/*
-** CAPI3REF: Source Of Data In A Query Result
-** METHOD: sqlite3_stmt
-**
-** ^These routines provide a means to determine the database, table, and
-** table column that is the origin of a particular result column in
-** [SELECT] statement.
-** ^The name of the database or table or column can be returned as
-** either a UTF-8 or UTF-16 string. ^The _database_ routines return
-** the database name, the _table_ routines return the table name, and
-** the origin_ routines return the column name.
-** ^The returned string is valid until the [prepared statement] is destroyed
-** using [sqlite3_finalize()] or until the statement is automatically
-** reprepared by the first call to [sqlite3_step()] for a particular run
-** or until the same information is requested
-** again in a different encoding.
-**
-** ^The names returned are the original un-aliased names of the
-** database, table, and column.
-**
-** ^The first argument to these interfaces is a [prepared statement].
-** ^These functions return information about the Nth result column returned by
-** the statement, where N is the second function argument.
-** ^The left-most column is column 0 for these routines.
-**
-** ^If the Nth column returned by the statement is an expression or
-** subquery and is not a column value, then all of these functions return
-** NULL. ^These routine might also return NULL if a memory allocation error
-** occurs. ^Otherwise, they return the name of the attached database, table,
-** or column that query result column was extracted from.
-**
-** ^As with all other SQLite APIs, those whose names end with "16" return
-** UTF-16 encoded strings and the other functions return UTF-8.
-**
-** ^These APIs are only available if the library was compiled with the
-** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol.
-**
-** If two or more threads call one or more of these routines against the same
-** prepared statement and column at the same time then the results are
-** undefined.
-**
-** If two or more threads call one or more
-** [sqlite3_column_database_name | column metadata interfaces]
-** for the same [prepared statement] and result column
-** at the same time then the results are undefined.
-*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_database_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_database_name16(sqlite3_stmt*,int);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_table_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_table_name16(sqlite3_stmt*,int);
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_origin_name(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_origin_name16(sqlite3_stmt*,int);
-
-/*
-** CAPI3REF: Declared Datatype Of A Query Result
-** METHOD: sqlite3_stmt
-**
-** ^(The first parameter is a [prepared statement].
-** If this statement is a [SELECT] statement and the Nth column of the
-** returned result set of that [SELECT] is a table column (not an
-** expression or subquery) then the declared type of the table
-** column is returned.)^ ^If the Nth column of the result set is an
-** expression or subquery, then a NULL pointer is returned.
-** ^The returned string is always UTF-8 encoded.
-**
-** ^(For example, given the database schema:
-**
-** CREATE TABLE t1(c1 VARIANT);
-**
-** and the following statement to be compiled:
-**
-** SELECT c1 + 1, c1 FROM t1;
-**
-** this routine would return the string "VARIANT" for the second result
-** column (i==1), and a NULL pointer for the first result column (i==0).)^
-**
-** ^SQLite uses dynamic run-time typing. ^So just because a column
-** is declared to contain a particular type does not mean that the
-** data stored in that column is of the declared type. SQLite is
-** strongly typed, but the typing is dynamic not static. ^Type
-** is associated with individual values, not with the containers
-** used to hold those values.
-*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_column_decltype(sqlite3_stmt*,int);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_decltype16(sqlite3_stmt*,int);
-
-/*
-** CAPI3REF: Evaluate An SQL Statement
-** METHOD: sqlite3_stmt
-**
-** After a [prepared statement] has been prepared using either
-** [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] or one of the legacy
-** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function
-** must be called one or more times to evaluate the statement.
-**
-** The details of the behavior of the sqlite3_step() interface depend
-** on whether the statement was prepared using the newer "v2" interface
-** [sqlite3_prepare_v2()] and [sqlite3_prepare16_v2()] or the older legacy
-** interface [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the
-** new "v2" interface is recommended for new applications but the legacy
-** interface will continue to be supported.
-**
-** ^In the legacy interface, the return value will be either [SQLITE_BUSY],
-** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE].
-** ^With the "v2" interface, any of the other [result codes] or
-** [extended result codes] might be returned as well.
-**
-** ^[SQLITE_BUSY] means that the database engine was unable to acquire the
-** database locks it needs to do its job. ^If the statement is a [COMMIT]
-** or occurs outside of an explicit transaction, then you can retry the
-** statement. If the statement is not a [COMMIT] and occurs within an
-** explicit transaction then you should rollback the transaction before
-** continuing.
-**
-** ^[SQLITE_DONE] means that the statement has finished executing
-** successfully. sqlite3_step() should not be called again on this virtual
-** machine without first calling [sqlite3_reset()] to reset the virtual
-** machine back to its initial state.
-**
-** ^If the SQL statement being executed returns any data, then [SQLITE_ROW]
-** is returned each time a new row of data is ready for processing by the
-** caller. The values may be accessed using the [column access functions].
-** sqlite3_step() is called again to retrieve the next row of data.
-**
-** ^[SQLITE_ERROR] means that a run-time error (such as a constraint
-** violation) has occurred. sqlite3_step() should not be called again on
-** the VM. More information may be found by calling [sqlite3_errmsg()].
-** ^With the legacy interface, a more specific error code (for example,
-** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth)
-** can be obtained by calling [sqlite3_reset()] on the
-** [prepared statement]. ^In the "v2" interface,
-** the more specific error code is returned directly by sqlite3_step().
-**
-** [SQLITE_MISUSE] means that the this routine was called inappropriately.
-** Perhaps it was called on a [prepared statement] that has
-** already been [sqlite3_finalize | finalized] or on one that had
-** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could
-** be the case that the same database connection is being used by two or
-** more threads at the same moment in time.
-**
-** For all versions of SQLite up to and including 3.6.23.1, a call to
-** [sqlite3_reset()] was required after sqlite3_step() returned anything
-** other than [SQLITE_ROW] before any subsequent invocation of
-** sqlite3_step(). Failure to reset the prepared statement using
-** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from
-** sqlite3_step(). But after version 3.6.23.1, sqlite3_step() began
-** calling [sqlite3_reset()] automatically in this circumstance rather
-** than returning [SQLITE_MISUSE]. This is not considered a compatibility
-** break because any application that ever receives an SQLITE_MISUSE error
-** is broken by definition. The [SQLITE_OMIT_AUTORESET] compile-time option
-** can be used to restore the legacy behavior.
-**
-** Goofy Interface Alert: In the legacy interface, the sqlite3_step()
-** API always returns a generic error code, [SQLITE_ERROR], following any
-** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call
-** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the
-** specific [error codes] that better describes the error.
-** We admit that this is a goofy design. The problem has been fixed
-** with the "v2" interface. If you prepare all of your SQL statements
-** using either [sqlite3_prepare_v2()] or [sqlite3_prepare16_v2()] instead
-** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces,
-** then the more specific [error codes] are returned directly
-** by sqlite3_step(). The use of the "v2" interface is recommended.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_step(sqlite3_stmt*);
-
-/*
-** CAPI3REF: Number of columns in a result set
-** METHOD: sqlite3_stmt
-**
-** ^The sqlite3_data_count(P) interface returns the number of columns in the
-** current row of the result set of [prepared statement] P.
-** ^If prepared statement P does not have results ready to return
-** (via calls to the [sqlite3_column_int | sqlite3_column_*()] of
-** interfaces) then sqlite3_data_count(P) returns 0.
-** ^The sqlite3_data_count(P) routine also returns 0 if P is a NULL pointer.
-** ^The sqlite3_data_count(P) routine returns 0 if the previous call to
-** [sqlite3_step](P) returned [SQLITE_DONE]. ^The sqlite3_data_count(P)
-** will return non-zero if previous call to [sqlite3_step](P) returned
-** [SQLITE_ROW], except in the case of the [PRAGMA incremental_vacuum]
-** where it always returns zero since each step of that multi-step
-** pragma returns 0 columns of data.
-**
-** See also: [sqlite3_column_count()]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_data_count(sqlite3_stmt *pStmt);
-
-/*
-** CAPI3REF: Fundamental Datatypes
-** KEYWORDS: SQLITE_TEXT
-**
-** ^(Every value in SQLite has one of five fundamental datatypes:
-**
-**
-** 64-bit signed integer
-** 64-bit IEEE floating point number
-** string
-** BLOB
-** NULL
-** )^
-**
-** These constants are codes for each of those types.
-**
-** Note that the SQLITE_TEXT constant was also used in SQLite version 2
-** for a completely different meaning. Software that links against both
-** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not
-** SQLITE_TEXT.
-*/
-#define SQLITE_INTEGER 1
-#define SQLITE_FLOAT 2
-#define SQLITE_BLOB 4
-#define SQLITE_NULL 5
-#ifdef SQLITE_TEXT
-# undef SQLITE_TEXT
-#else
-# define SQLITE_TEXT 3
-#endif
-#define SQLITE3_TEXT 3
-
-/*
-** CAPI3REF: Result Values From A Query
-** KEYWORDS: {column access functions}
-** METHOD: sqlite3_stmt
-**
-** ^These routines return information about a single column of the current
-** result row of a query. ^In every case the first argument is a pointer
-** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*]
-** that was returned from [sqlite3_prepare_v2()] or one of its variants)
-** and the second argument is the index of the column for which information
-** should be returned. ^The leftmost column of the result set has the index 0.
-** ^The number of columns in the result can be determined using
-** [sqlite3_column_count()].
-**
-** If the SQL statement does not currently point to a valid row, or if the
-** column index is out of range, the result is undefined.
-** These routines may only be called when the most recent call to
-** [sqlite3_step()] has returned [SQLITE_ROW] and neither
-** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently.
-** If any of these routines are called after [sqlite3_reset()] or
-** [sqlite3_finalize()] or after [sqlite3_step()] has returned
-** something other than [SQLITE_ROW], the results are undefined.
-** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()]
-** are called from a different thread while any of these routines
-** are pending, then the results are undefined.
-**
-** ^The sqlite3_column_type() routine returns the
-** [SQLITE_INTEGER | datatype code] for the initial data type
-** of the result column. ^The returned value is one of [SQLITE_INTEGER],
-** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL]. The value
-** returned by sqlite3_column_type() is only meaningful if no type
-** conversions have occurred as described below. After a type conversion,
-** the value returned by sqlite3_column_type() is undefined. Future
-** versions of SQLite may change the behavior of sqlite3_column_type()
-** following a type conversion.
-**
-** ^If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes()
-** routine returns the number of bytes in that BLOB or string.
-** ^If the result is a UTF-16 string, then sqlite3_column_bytes() converts
-** the string to UTF-8 and then returns the number of bytes.
-** ^If the result is a numeric value then sqlite3_column_bytes() uses
-** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns
-** the number of bytes in that string.
-** ^If the result is NULL, then sqlite3_column_bytes() returns zero.
-**
-** ^If the result is a BLOB or UTF-16 string then the sqlite3_column_bytes16()
-** routine returns the number of bytes in that BLOB or string.
-** ^If the result is a UTF-8 string, then sqlite3_column_bytes16() converts
-** the string to UTF-16 and then returns the number of bytes.
-** ^If the result is a numeric value then sqlite3_column_bytes16() uses
-** [sqlite3_snprintf()] to convert that value to a UTF-16 string and returns
-** the number of bytes in that string.
-** ^If the result is NULL, then sqlite3_column_bytes16() returns zero.
-**
-** ^The values returned by [sqlite3_column_bytes()] and
-** [sqlite3_column_bytes16()] do not include the zero terminators at the end
-** of the string. ^For clarity: the values returned by
-** [sqlite3_column_bytes()] and [sqlite3_column_bytes16()] are the number of
-** bytes in the string, not the number of characters.
-**
-** ^Strings returned by sqlite3_column_text() and sqlite3_column_text16(),
-** even empty strings, are always zero-terminated. ^The return
-** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer.
-**
-** Warning: ^The object returned by [sqlite3_column_value()] is an
-** [unprotected sqlite3_value] object. In a multithreaded environment,
-** an unprotected sqlite3_value object may only be used safely with
-** [sqlite3_bind_value()] and [sqlite3_result_value()].
-** If the [unprotected sqlite3_value] object returned by
-** [sqlite3_column_value()] is used in any other way, including calls
-** to routines like [sqlite3_value_int()], [sqlite3_value_text()],
-** or [sqlite3_value_bytes()], the behavior is not threadsafe.
-**
-** These routines attempt to convert the value where appropriate. ^For
-** example, if the internal representation is FLOAT and a text result
-** is requested, [sqlite3_snprintf()] is used internally to perform the
-** conversion automatically. ^(The following table details the conversions
-** that are applied:
-**
-**
-**
-** Internal Type Requested Type Conversion
-**
-** NULL INTEGER Result is 0
-** NULL FLOAT Result is 0.0
-** NULL TEXT Result is a NULL pointer
-** NULL BLOB Result is a NULL pointer
-** INTEGER FLOAT Convert from integer to float
-** INTEGER TEXT ASCII rendering of the integer
-** INTEGER BLOB Same as INTEGER->TEXT
-** FLOAT INTEGER [CAST] to INTEGER
-** FLOAT TEXT ASCII rendering of the float
-** FLOAT BLOB [CAST] to BLOB
-** TEXT INTEGER [CAST] to INTEGER
-** TEXT FLOAT [CAST] to REAL
-** TEXT BLOB No change
-** BLOB INTEGER [CAST] to INTEGER
-** BLOB FLOAT [CAST] to REAL
-** BLOB TEXT Add a zero terminator if needed
-**
-** )^
-**
-** Note that when type conversions occur, pointers returned by prior
-** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or
-** sqlite3_column_text16() may be invalidated.
-** Type conversions and pointer invalidations might occur
-** in the following cases:
-**
-**
-** The initial content is a BLOB and sqlite3_column_text() or
-** sqlite3_column_text16() is called. A zero-terminator might
-** need to be added to the string.
-** The initial content is UTF-8 text and sqlite3_column_bytes16() or
-** sqlite3_column_text16() is called. The content must be converted
-** to UTF-16.
-** The initial content is UTF-16 text and sqlite3_column_bytes() or
-** sqlite3_column_text() is called. The content must be converted
-** to UTF-8.
-**
-**
-** ^Conversions between UTF-16be and UTF-16le are always done in place and do
-** not invalidate a prior pointer, though of course the content of the buffer
-** that the prior pointer references will have been modified. Other kinds
-** of conversion are done in place when it is possible, but sometimes they
-** are not possible and in those cases prior pointers are invalidated.
-**
-** The safest policy is to invoke these routines
-** in one of the following ways:
-**
-**
-** sqlite3_column_text() followed by sqlite3_column_bytes()
-** sqlite3_column_blob() followed by sqlite3_column_bytes()
-** sqlite3_column_text16() followed by sqlite3_column_bytes16()
-**
-**
-** In other words, you should call sqlite3_column_text(),
-** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result
-** into the desired format, then invoke sqlite3_column_bytes() or
-** sqlite3_column_bytes16() to find the size of the result. Do not mix calls
-** to sqlite3_column_text() or sqlite3_column_blob() with calls to
-** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16()
-** with calls to sqlite3_column_bytes().
-**
-** ^The pointers returned are valid until a type conversion occurs as
-** described above, or until [sqlite3_step()] or [sqlite3_reset()] or
-** [sqlite3_finalize()] is called. ^The memory space used to hold strings
-** and BLOBs is freed automatically. Do not pass the pointers returned
-** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into
-** [sqlite3_free()].
-**
-** ^(If a memory allocation error occurs during the evaluation of any
-** of these routines, a default value is returned. The default value
-** is either the integer 0, the floating point number 0.0, or a NULL
-** pointer. Subsequent calls to [sqlite3_errcode()] will return
-** [SQLITE_NOMEM].)^
-*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_blob(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_bytes16(sqlite3_stmt*, int iCol);
-SQLITE_API double SQLITE_STDCALL sqlite3_column_double(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_int(sqlite3_stmt*, int iCol);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_column_int64(sqlite3_stmt*, int iCol);
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_column_text(sqlite3_stmt*, int iCol);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_column_text16(sqlite3_stmt*, int iCol);
-SQLITE_API int SQLITE_STDCALL sqlite3_column_type(sqlite3_stmt*, int iCol);
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_column_value(sqlite3_stmt*, int iCol);
-
-/*
-** CAPI3REF: Destroy A Prepared Statement Object
-** DESTRUCTOR: sqlite3_stmt
-**
-** ^The sqlite3_finalize() function is called to delete a [prepared statement].
-** ^If the most recent evaluation of the statement encountered no errors
-** or if the statement is never been evaluated, then sqlite3_finalize() returns
-** SQLITE_OK. ^If the most recent evaluation of statement S failed, then
-** sqlite3_finalize(S) returns the appropriate [error code] or
-** [extended error code].
-**
-** ^The sqlite3_finalize(S) routine can be called at any point during
-** the life cycle of [prepared statement] S:
-** before statement S is ever evaluated, after
-** one or more calls to [sqlite3_reset()], or after any call
-** to [sqlite3_step()] regardless of whether or not the statement has
-** completed execution.
-**
-** ^Invoking sqlite3_finalize() on a NULL pointer is a harmless no-op.
-**
-** The application must finalize every [prepared statement] in order to avoid
-** resource leaks. It is a grievous error for the application to try to use
-** a prepared statement after it has been finalized. Any use of a prepared
-** statement after it has been finalized can result in undefined and
-** undesirable behavior such as segfaults and heap corruption.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_finalize(sqlite3_stmt *pStmt);
-
-/*
-** CAPI3REF: Reset A Prepared Statement Object
-** METHOD: sqlite3_stmt
-**
-** The sqlite3_reset() function is called to reset a [prepared statement]
-** object back to its initial state, ready to be re-executed.
-** ^Any SQL statement variables that had values bound to them using
-** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values.
-** Use [sqlite3_clear_bindings()] to reset the bindings.
-**
-** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S
-** back to the beginning of its program.
-**
-** ^If the most recent call to [sqlite3_step(S)] for the
-** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE],
-** or if [sqlite3_step(S)] has never before been called on S,
-** then [sqlite3_reset(S)] returns [SQLITE_OK].
-**
-** ^If the most recent call to [sqlite3_step(S)] for the
-** [prepared statement] S indicated an error, then
-** [sqlite3_reset(S)] returns an appropriate [error code].
-**
-** ^The [sqlite3_reset(S)] interface does not change the values
-** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_reset(sqlite3_stmt *pStmt);
-
-/*
-** CAPI3REF: Create Or Redefine SQL Functions
-** KEYWORDS: {function creation routines}
-** KEYWORDS: {application-defined SQL function}
-** KEYWORDS: {application-defined SQL functions}
-** METHOD: sqlite3
-**
-** ^These functions (collectively known as "function creation routines")
-** are used to add SQL functions or aggregates or to redefine the behavior
-** of existing SQL functions or aggregates. The only differences between
-** these routines are the text encoding expected for
-** the second parameter (the name of the function being created)
-** and the presence or absence of a destructor callback for
-** the application data pointer.
-**
-** ^The first parameter is the [database connection] to which the SQL
-** function is to be added. ^If an application uses more than one database
-** connection then application-defined SQL functions must be added
-** to each database connection separately.
-**
-** ^The second parameter is the name of the SQL function to be created or
-** redefined. ^The length of the name is limited to 255 bytes in a UTF-8
-** representation, exclusive of the zero-terminator. ^Note that the name
-** length limit is in UTF-8 bytes, not characters nor UTF-16 bytes.
-** ^Any attempt to create a function with a longer name
-** will result in [SQLITE_MISUSE] being returned.
-**
-** ^The third parameter (nArg)
-** is the number of arguments that the SQL function or
-** aggregate takes. ^If this parameter is -1, then the SQL function or
-** aggregate may take any number of arguments between 0 and the limit
-** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third
-** parameter is less than -1 or greater than 127 then the behavior is
-** undefined.
-**
-** ^The fourth parameter, eTextRep, specifies what
-** [SQLITE_UTF8 | text encoding] this SQL function prefers for
-** its parameters. The application should set this parameter to
-** [SQLITE_UTF16LE] if the function implementation invokes
-** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the
-** implementation invokes [sqlite3_value_text16be()] on an input, or
-** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8]
-** otherwise. ^The same SQL function may be registered multiple times using
-** different preferred text encodings, with different implementations for
-** each encoding.
-** ^When multiple implementations of the same function are available, SQLite
-** will pick the one that involves the least amount of data conversion.
-**
-** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC]
-** to signal that the function will always return the same result given
-** the same inputs within a single SQL statement. Most SQL functions are
-** deterministic. The built-in [random()] SQL function is an example of a
-** function that is not deterministic. The SQLite query planner is able to
-** perform additional optimizations on deterministic functions, so use
-** of the [SQLITE_DETERMINISTIC] flag is recommended where possible.
-**
-** ^(The fifth parameter is an arbitrary pointer. The implementation of the
-** function can gain access to this pointer using [sqlite3_user_data()].)^
-**
-** ^The sixth, seventh and eighth parameters, xFunc, xStep and xFinal, are
-** pointers to C-language functions that implement the SQL function or
-** aggregate. ^A scalar SQL function requires an implementation of the xFunc
-** callback only; NULL pointers must be passed as the xStep and xFinal
-** parameters. ^An aggregate SQL function requires an implementation of xStep
-** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing
-** SQL function or aggregate, pass NULL pointers for all three function
-** callbacks.
-**
-** ^(If the ninth parameter to sqlite3_create_function_v2() is not NULL,
-** then it is destructor for the application data pointer.
-** The destructor is invoked when the function is deleted, either by being
-** overloaded or when the database connection closes.)^
-** ^The destructor is also invoked if the call to
-** sqlite3_create_function_v2() fails.
-** ^When the destructor callback of the tenth parameter is invoked, it
-** is passed a single argument which is a copy of the application data
-** pointer which was the fifth parameter to sqlite3_create_function_v2().
-**
-** ^It is permitted to register multiple implementations of the same
-** functions with the same name but with either differing numbers of
-** arguments or differing preferred text encodings. ^SQLite will use
-** the implementation that most closely matches the way in which the
-** SQL function is used. ^A function implementation with a non-negative
-** nArg parameter is a better match than a function implementation with
-** a negative nArg. ^A function where the preferred text encoding
-** matches the database encoding is a better
-** match than a function where the encoding is different.
-** ^A function where the encoding difference is between UTF16le and UTF16be
-** is a closer match than a function where the encoding difference is
-** between UTF8 and UTF16.
-**
-** ^Built-in functions may be overloaded by new application-defined functions.
-**
-** ^An application-defined function is permitted to call other
-** SQLite interfaces. However, such calls must not
-** close the database connection nor finalize or reset the prepared
-** statement in which the function is running.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function(
- sqlite3 *db,
- const char *zFunctionName,
- int nArg,
- int eTextRep,
- void *pApp,
- void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
- void (*xStep)(sqlite3_context*,int,sqlite3_value**),
- void (*xFinal)(sqlite3_context*)
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function16(
- sqlite3 *db,
- const void *zFunctionName,
- int nArg,
- int eTextRep,
- void *pApp,
- void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
- void (*xStep)(sqlite3_context*,int,sqlite3_value**),
- void (*xFinal)(sqlite3_context*)
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_function_v2(
- sqlite3 *db,
- const char *zFunctionName,
- int nArg,
- int eTextRep,
- void *pApp,
- void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
- void (*xStep)(sqlite3_context*,int,sqlite3_value**),
- void (*xFinal)(sqlite3_context*),
- void(*xDestroy)(void*)
-);
-
-/*
-** CAPI3REF: Text Encodings
-**
-** These constant define integer codes that represent the various
-** text encodings supported by SQLite.
-*/
-#define SQLITE_UTF8 1 /* IMP: R-37514-35566 */
-#define SQLITE_UTF16LE 2 /* IMP: R-03371-37637 */
-#define SQLITE_UTF16BE 3 /* IMP: R-51971-34154 */
-#define SQLITE_UTF16 4 /* Use native byte order */
-#define SQLITE_ANY 5 /* Deprecated */
-#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */
-
-/*
-** CAPI3REF: Function Flags
-**
-** These constants may be ORed together with the
-** [SQLITE_UTF8 | preferred text encoding] as the fourth argument
-** to [sqlite3_create_function()], [sqlite3_create_function16()], or
-** [sqlite3_create_function_v2()].
-*/
-#define SQLITE_DETERMINISTIC 0x800
-
-/*
-** CAPI3REF: Deprecated Functions
-** DEPRECATED
-**
-** These functions are [deprecated]. In order to maintain
-** backwards compatibility with older code, these functions continue
-** to be supported. However, new applications should avoid
-** the use of these functions. To encourage programmers to avoid
-** these functions, we will not explain what they do.
-*/
-#ifndef SQLITE_OMIT_DEPRECATED
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_aggregate_count(sqlite3_context*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_expired(sqlite3_stmt*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_global_recover(void);
-SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_thread_cleanup(void);
-SQLITE_API SQLITE_DEPRECATED int SQLITE_STDCALL sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int),
- void*,sqlite3_int64);
-#endif
-
-/*
-** CAPI3REF: Obtaining SQL Values
-** METHOD: sqlite3_value
-**
-** The C-language implementation of SQL functions and aggregates uses
-** this set of interface routines to access the parameter values on
-** the function or aggregate.
-**
-** The xFunc (for scalar functions) or xStep (for aggregates) parameters
-** to [sqlite3_create_function()] and [sqlite3_create_function16()]
-** define callbacks that implement the SQL functions and aggregates.
-** The 3rd parameter to these callbacks is an array of pointers to
-** [protected sqlite3_value] objects. There is one [sqlite3_value] object for
-** each parameter to the SQL function. These routines are used to
-** extract values from the [sqlite3_value] objects.
-**
-** These routines work only with [protected sqlite3_value] objects.
-** Any attempt to use these routines on an [unprotected sqlite3_value]
-** object results in undefined behavior.
-**
-** ^These routines work just like the corresponding [column access functions]
-** except that these routines take a single [protected sqlite3_value] object
-** pointer instead of a [sqlite3_stmt*] pointer and an integer column number.
-**
-** ^The sqlite3_value_text16() interface extracts a UTF-16 string
-** in the native byte-order of the host machine. ^The
-** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces
-** extract UTF-16 strings as big-endian and little-endian respectively.
-**
-** ^(The sqlite3_value_numeric_type() interface attempts to apply
-** numeric affinity to the value. This means that an attempt is
-** made to convert the value to an integer or floating point. If
-** such a conversion is possible without loss of information (in other
-** words, if the value is a string that looks like a number)
-** then the conversion is performed. Otherwise no conversion occurs.
-** The [SQLITE_INTEGER | datatype] after conversion is returned.)^
-**
-** Please pay particular attention to the fact that the pointer returned
-** from [sqlite3_value_blob()], [sqlite3_value_text()], or
-** [sqlite3_value_text16()] can be invalidated by a subsequent call to
-** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()],
-** or [sqlite3_value_text16()].
-**
-** These routines must be called from the same thread as
-** the SQL function that supplied the [sqlite3_value*] parameters.
-*/
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_blob(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_bytes16(sqlite3_value*);
-SQLITE_API double SQLITE_STDCALL sqlite3_value_double(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_int(sqlite3_value*);
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_value_int64(sqlite3_value*);
-SQLITE_API const unsigned char *SQLITE_STDCALL sqlite3_value_text(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16le(sqlite3_value*);
-SQLITE_API const void *SQLITE_STDCALL sqlite3_value_text16be(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_type(sqlite3_value*);
-SQLITE_API int SQLITE_STDCALL sqlite3_value_numeric_type(sqlite3_value*);
-
-/*
-** CAPI3REF: Finding The Subtype Of SQL Values
-** METHOD: sqlite3_value
-**
-** The sqlite3_value_subtype(V) function returns the subtype for
-** an [application-defined SQL function] argument V. The subtype
-** information can be used to pass a limited amount of context from
-** one SQL function to another. Use the [sqlite3_result_subtype()]
-** routine to set the subtype for the return value of an SQL function.
-**
-** SQLite makes no use of subtype itself. It merely passes the subtype
-** from the result of one [application-defined SQL function] into the
-** input of another.
-*/
-SQLITE_API unsigned int SQLITE_STDCALL sqlite3_value_subtype(sqlite3_value*);
-
-/*
-** CAPI3REF: Copy And Free SQL Values
-** METHOD: sqlite3_value
-**
-** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value]
-** object D and returns a pointer to that copy. ^The [sqlite3_value] returned
-** is a [protected sqlite3_value] object even if the input is not.
-** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a
-** memory allocation fails.
-**
-** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object
-** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer
-** then sqlite3_value_free(V) is a harmless no-op.
-*/
-SQLITE_API sqlite3_value *SQLITE_STDCALL sqlite3_value_dup(const sqlite3_value*);
-SQLITE_API void SQLITE_STDCALL sqlite3_value_free(sqlite3_value*);
-
-/*
-** CAPI3REF: Obtain Aggregate Function Context
-** METHOD: sqlite3_context
-**
-** Implementations of aggregate SQL functions use this
-** routine to allocate memory for storing their state.
-**
-** ^The first time the sqlite3_aggregate_context(C,N) routine is called
-** for a particular aggregate function, SQLite
-** allocates N of memory, zeroes out that memory, and returns a pointer
-** to the new memory. ^On second and subsequent calls to
-** sqlite3_aggregate_context() for the same aggregate function instance,
-** the same buffer is returned. Sqlite3_aggregate_context() is normally
-** called once for each invocation of the xStep callback and then one
-** last time when the xFinal callback is invoked. ^(When no rows match
-** an aggregate query, the xStep() callback of the aggregate function
-** implementation is never called and xFinal() is called exactly once.
-** In those cases, sqlite3_aggregate_context() might be called for the
-** first time from within xFinal().)^
-**
-** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer
-** when first called if N is less than or equal to zero or if a memory
-** allocate error occurs.
-**
-** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is
-** determined by the N parameter on first successful call. Changing the
-** value of N in subsequent call to sqlite3_aggregate_context() within
-** the same aggregate function instance will not resize the memory
-** allocation.)^ Within the xFinal callback, it is customary to set
-** N=0 in calls to sqlite3_aggregate_context(C,N) so that no
-** pointless memory allocations occur.
-**
-** ^SQLite automatically frees the memory allocated by
-** sqlite3_aggregate_context() when the aggregate query concludes.
-**
-** The first parameter must be a copy of the
-** [sqlite3_context | SQL function context] that is the first parameter
-** to the xStep or xFinal callback routine that implements the aggregate
-** function.
-**
-** This routine must be called from the same thread in which
-** the aggregate SQL function is running.
-*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_aggregate_context(sqlite3_context*, int nBytes);
-
-/*
-** CAPI3REF: User Data For Functions
-** METHOD: sqlite3_context
-**
-** ^The sqlite3_user_data() interface returns a copy of
-** the pointer that was the pUserData parameter (the 5th parameter)
-** of the [sqlite3_create_function()]
-** and [sqlite3_create_function16()] routines that originally
-** registered the application defined function.
-**
-** This routine must be called from the same thread in which
-** the application-defined function is running.
-*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_user_data(sqlite3_context*);
-
-/*
-** CAPI3REF: Database Connection For Functions
-** METHOD: sqlite3_context
-**
-** ^The sqlite3_context_db_handle() interface returns a copy of
-** the pointer to the [database connection] (the 1st parameter)
-** of the [sqlite3_create_function()]
-** and [sqlite3_create_function16()] routines that originally
-** registered the application defined function.
-*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_context_db_handle(sqlite3_context*);
-
-/*
-** CAPI3REF: Function Auxiliary Data
-** METHOD: sqlite3_context
-**
-** These functions may be used by (non-aggregate) SQL functions to
-** associate metadata with argument values. If the same value is passed to
-** multiple invocations of the same SQL function during query execution, under
-** some circumstances the associated metadata may be preserved. An example
-** of where this might be useful is in a regular-expression matching
-** function. The compiled version of the regular expression can be stored as
-** metadata associated with the pattern string.
-** Then as long as the pattern string remains the same,
-** the compiled regular expression can be reused on multiple
-** invocations of the same function.
-**
-** ^The sqlite3_get_auxdata() interface returns a pointer to the metadata
-** associated by the sqlite3_set_auxdata() function with the Nth argument
-** value to the application-defined function. ^If there is no metadata
-** associated with the function argument, this sqlite3_get_auxdata() interface
-** returns a NULL pointer.
-**
-** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th
-** argument of the application-defined function. ^Subsequent
-** calls to sqlite3_get_auxdata(C,N) return P from the most recent
-** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or
-** NULL if the metadata has been discarded.
-** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL,
-** SQLite will invoke the destructor function X with parameter P exactly
-** once, when the metadata is discarded.
-** SQLite is free to discard the metadata at any time, including:
-** ^(when the corresponding function parameter changes)^, or
-** ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the
-** SQL statement)^, or
-** ^(when sqlite3_set_auxdata() is invoked again on the same
-** parameter)^, or
-** ^(during the original sqlite3_set_auxdata() call when a memory
-** allocation error occurs.)^
-**
-** Note the last bullet in particular. The destructor X in
-** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the
-** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata()
-** should be called near the end of the function implementation and the
-** function implementation should not make any use of P after
-** sqlite3_set_auxdata() has been called.
-**
-** ^(In practice, metadata is preserved between function calls for
-** function parameters that are compile-time constants, including literal
-** values and [parameters] and expressions composed from the same.)^
-**
-** These routines must be called from the same thread in which
-** the SQL function is running.
-*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_get_auxdata(sqlite3_context*, int N);
-SQLITE_API void SQLITE_STDCALL sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
-
-
-/*
-** CAPI3REF: Constants Defining Special Destructor Behavior
-**
-** These are special values for the destructor that is passed in as the
-** final argument to routines like [sqlite3_result_blob()]. ^If the destructor
-** argument is SQLITE_STATIC, it means that the content pointer is constant
-** and will never change. It does not need to be destroyed. ^The
-** SQLITE_TRANSIENT value means that the content will likely change in
-** the near future and that SQLite should make its own private copy of
-** the content before returning.
-**
-** The typedef is necessary to work around problems in certain
-** C++ compilers.
-*/
-typedef void (*sqlite3_destructor_type)(void*);
-#define SQLITE_STATIC ((sqlite3_destructor_type)0)
-#define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1)
-
-/*
-** CAPI3REF: Setting The Result Of An SQL Function
-** METHOD: sqlite3_context
-**
-** These routines are used by the xFunc or xFinal callbacks that
-** implement SQL functions and aggregates. See
-** [sqlite3_create_function()] and [sqlite3_create_function16()]
-** for additional information.
-**
-** These functions work very much like the [parameter binding] family of
-** functions used to bind values to host parameters in prepared statements.
-** Refer to the [SQL parameter] documentation for additional information.
-**
-** ^The sqlite3_result_blob() interface sets the result from
-** an application-defined function to be the BLOB whose content is pointed
-** to by the second parameter and which is N bytes long where N is the
-** third parameter.
-**
-** ^The sqlite3_result_zeroblob(C,N) and sqlite3_result_zeroblob64(C,N)
-** interfaces set the result of the application-defined function to be
-** a BLOB containing all zero bytes and N bytes in size.
-**
-** ^The sqlite3_result_double() interface sets the result from
-** an application-defined function to be a floating point value specified
-** by its 2nd argument.
-**
-** ^The sqlite3_result_error() and sqlite3_result_error16() functions
-** cause the implemented SQL function to throw an exception.
-** ^SQLite uses the string pointed to by the
-** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16()
-** as the text of an error message. ^SQLite interprets the error
-** message string from sqlite3_result_error() as UTF-8. ^SQLite
-** interprets the string from sqlite3_result_error16() as UTF-16 in native
-** byte order. ^If the third parameter to sqlite3_result_error()
-** or sqlite3_result_error16() is negative then SQLite takes as the error
-** message all text up through the first zero character.
-** ^If the third parameter to sqlite3_result_error() or
-** sqlite3_result_error16() is non-negative then SQLite takes that many
-** bytes (not characters) from the 2nd parameter as the error message.
-** ^The sqlite3_result_error() and sqlite3_result_error16()
-** routines make a private copy of the error message text before
-** they return. Hence, the calling function can deallocate or
-** modify the text after they return without harm.
-** ^The sqlite3_result_error_code() function changes the error code
-** returned by SQLite as a result of an error in a function. ^By default,
-** the error code is SQLITE_ERROR. ^A subsequent call to sqlite3_result_error()
-** or sqlite3_result_error16() resets the error code to SQLITE_ERROR.
-**
-** ^The sqlite3_result_error_toobig() interface causes SQLite to throw an
-** error indicating that a string or BLOB is too long to represent.
-**
-** ^The sqlite3_result_error_nomem() interface causes SQLite to throw an
-** error indicating that a memory allocation failed.
-**
-** ^The sqlite3_result_int() interface sets the return value
-** of the application-defined function to be the 32-bit signed integer
-** value given in the 2nd argument.
-** ^The sqlite3_result_int64() interface sets the return value
-** of the application-defined function to be the 64-bit signed integer
-** value given in the 2nd argument.
-**
-** ^The sqlite3_result_null() interface sets the return value
-** of the application-defined function to be NULL.
-**
-** ^The sqlite3_result_text(), sqlite3_result_text16(),
-** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces
-** set the return value of the application-defined function to be
-** a text string which is represented as UTF-8, UTF-16 native byte order,
-** UTF-16 little endian, or UTF-16 big endian, respectively.
-** ^The sqlite3_result_text64() interface sets the return value of an
-** application-defined function to be a text string in an encoding
-** specified by the fifth (and last) parameter, which must be one
-** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE].
-** ^SQLite takes the text result from the application from
-** the 2nd parameter of the sqlite3_result_text* interfaces.
-** ^If the 3rd parameter to the sqlite3_result_text* interfaces
-** is negative, then SQLite takes result text from the 2nd parameter
-** through the first zero character.
-** ^If the 3rd parameter to the sqlite3_result_text* interfaces
-** is non-negative, then as many bytes (not characters) of the text
-** pointed to by the 2nd parameter are taken as the application-defined
-** function result. If the 3rd parameter is non-negative, then it
-** must be the byte offset into the string where the NUL terminator would
-** appear if the string where NUL terminated. If any NUL characters occur
-** in the string at a byte offset that is less than the value of the 3rd
-** parameter, then the resulting string will contain embedded NULs and the
-** result of expressions operating on strings with embedded NULs is undefined.
-** ^If the 4th parameter to the sqlite3_result_text* interfaces
-** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that
-** function as the destructor on the text or BLOB result when it has
-** finished using that result.
-** ^If the 4th parameter to the sqlite3_result_text* interfaces or to
-** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite
-** assumes that the text or BLOB result is in constant space and does not
-** copy the content of the parameter nor call a destructor on the content
-** when it has finished using that result.
-** ^If the 4th parameter to the sqlite3_result_text* interfaces
-** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT
-** then SQLite makes a copy of the result into space obtained from
-** from [sqlite3_malloc()] before it returns.
-**
-** ^The sqlite3_result_value() interface sets the result of
-** the application-defined function to be a copy of the
-** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The
-** sqlite3_result_value() interface makes a copy of the [sqlite3_value]
-** so that the [sqlite3_value] specified in the parameter may change or
-** be deallocated after sqlite3_result_value() returns without harm.
-** ^A [protected sqlite3_value] object may always be used where an
-** [unprotected sqlite3_value] object is required, so either
-** kind of [sqlite3_value] object can be used with this interface.
-**
-** If these routines are called from within the different thread
-** than the one containing the application-defined function that received
-** the [sqlite3_context] pointer, the results are undefined.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_blob64(sqlite3_context*,const void*,
- sqlite3_uint64,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_double(sqlite3_context*, double);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error(sqlite3_context*, const char*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error16(sqlite3_context*, const void*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_toobig(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_nomem(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_error_code(sqlite3_context*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int(sqlite3_context*, int);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_null(sqlite3_context*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64,
- void(*)(void*), unsigned char encoding);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*));
-SQLITE_API void SQLITE_STDCALL sqlite3_result_value(sqlite3_context*, sqlite3_value*);
-SQLITE_API void SQLITE_STDCALL sqlite3_result_zeroblob(sqlite3_context*, int n);
-SQLITE_API int SQLITE_STDCALL sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
-
-
-/*
-** CAPI3REF: Setting The Subtype Of An SQL Function
-** METHOD: sqlite3_context
-**
-** The sqlite3_result_subtype(C,T) function causes the subtype of
-** the result from the [application-defined SQL function] with
-** [sqlite3_context] C to be the value T. Only the lower 8 bits
-** of the subtype T are preserved in current versions of SQLite;
-** higher order bits are discarded.
-** The number of subtype bytes preserved by SQLite might increase
-** in future releases of SQLite.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_result_subtype(sqlite3_context*,unsigned int);
-
-/*
-** CAPI3REF: Define New Collating Sequences
-** METHOD: sqlite3
-**
-** ^These functions add, remove, or modify a [collation] associated
-** with the [database connection] specified as the first argument.
-**
-** ^The name of the collation is a UTF-8 string
-** for sqlite3_create_collation() and sqlite3_create_collation_v2()
-** and a UTF-16 string in native byte order for sqlite3_create_collation16().
-** ^Collation names that compare equal according to [sqlite3_strnicmp()] are
-** considered to be the same name.
-**
-** ^(The third argument (eTextRep) must be one of the constants:
-**
-** [SQLITE_UTF8],
-** [SQLITE_UTF16LE],
-** [SQLITE_UTF16BE],
-** [SQLITE_UTF16], or
-** [SQLITE_UTF16_ALIGNED].
-** )^
-** ^The eTextRep argument determines the encoding of strings passed
-** to the collating function callback, xCallback.
-** ^The [SQLITE_UTF16] and [SQLITE_UTF16_ALIGNED] values for eTextRep
-** force strings to be UTF16 with native byte order.
-** ^The [SQLITE_UTF16_ALIGNED] value for eTextRep forces strings to begin
-** on an even byte address.
-**
-** ^The fourth argument, pArg, is an application data pointer that is passed
-** through as the first argument to the collating function callback.
-**
-** ^The fifth argument, xCallback, is a pointer to the collating function.
-** ^Multiple collating functions can be registered using the same name but
-** with different eTextRep parameters and SQLite will use whichever
-** function requires the least amount of data transformation.
-** ^If the xCallback argument is NULL then the collating function is
-** deleted. ^When all collating functions having the same name are deleted,
-** that collation is no longer usable.
-**
-** ^The collating function callback is invoked with a copy of the pArg
-** application data pointer and with two strings in the encoding specified
-** by the eTextRep argument. The collating function must return an
-** integer that is negative, zero, or positive
-** if the first string is less than, equal to, or greater than the second,
-** respectively. A collating function must always return the same answer
-** given the same inputs. If two or more collating functions are registered
-** to the same collation name (using different eTextRep values) then all
-** must give an equivalent answer when invoked with equivalent strings.
-** The collating function must obey the following properties for all
-** strings A, B, and C:
-**
-**
-** If A==B then B==A.
-** If A==B and B==C then A==C.
-** If A<B THEN B>A.
-** If A<B and B<C then A<C.
-**
-**
-** If a collating function fails any of the above constraints and that
-** collating function is registered and used, then the behavior of SQLite
-** is undefined.
-**
-** ^The sqlite3_create_collation_v2() works like sqlite3_create_collation()
-** with the addition that the xDestroy callback is invoked on pArg when
-** the collating function is deleted.
-** ^Collating functions are deleted when they are overridden by later
-** calls to the collation creation functions or when the
-** [database connection] is closed using [sqlite3_close()].
-**
-** ^The xDestroy callback is not called if the
-** sqlite3_create_collation_v2() function fails. Applications that invoke
-** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should
-** check the return code and dispose of the application data pointer
-** themselves rather than expecting SQLite to deal with it for them.
-** This is different from every other SQLite interface. The inconsistency
-** is unfortunate but cannot be changed without breaking backwards
-** compatibility.
-**
-** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation(
- sqlite3*,
- const char *zName,
- int eTextRep,
- void *pArg,
- int(*xCompare)(void*,int,const void*,int,const void*)
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation_v2(
- sqlite3*,
- const char *zName,
- int eTextRep,
- void *pArg,
- int(*xCompare)(void*,int,const void*,int,const void*),
- void(*xDestroy)(void*)
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_collation16(
- sqlite3*,
- const void *zName,
- int eTextRep,
- void *pArg,
- int(*xCompare)(void*,int,const void*,int,const void*)
-);
-
-/*
-** CAPI3REF: Collation Needed Callbacks
-** METHOD: sqlite3
-**
-** ^To avoid having to register all collation sequences before a database
-** can be used, a single callback function may be registered with the
-** [database connection] to be invoked whenever an undefined collation
-** sequence is required.
-**
-** ^If the function is registered using the sqlite3_collation_needed() API,
-** then it is passed the names of undefined collation sequences as strings
-** encoded in UTF-8. ^If sqlite3_collation_needed16() is used,
-** the names are passed as UTF-16 in machine native byte order.
-** ^A call to either function replaces the existing collation-needed callback.
-**
-** ^(When the callback is invoked, the first argument passed is a copy
-** of the second argument to sqlite3_collation_needed() or
-** sqlite3_collation_needed16(). The second argument is the database
-** connection. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE],
-** or [SQLITE_UTF16LE], indicating the most desirable form of the collation
-** sequence function required. The fourth parameter is the name of the
-** required collation sequence.)^
-**
-** The callback function should register the desired collation using
-** [sqlite3_create_collation()], [sqlite3_create_collation16()], or
-** [sqlite3_create_collation_v2()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed(
- sqlite3*,
- void*,
- void(*)(void*,sqlite3*,int eTextRep,const char*)
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_collation_needed16(
- sqlite3*,
- void*,
- void(*)(void*,sqlite3*,int eTextRep,const void*)
-);
-
-#ifdef SQLITE_HAS_CODEC
-/*
-** Specify the key for an encrypted database. This routine should be
-** called right after sqlite3_open().
-**
-** The code to implement this API is not available in the public release
-** of SQLite.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_key(
- sqlite3 *db, /* Database to be rekeyed */
- const void *pKey, int nKey /* The key */
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_key_v2(
- sqlite3 *db, /* Database to be rekeyed */
- const char *zDbName, /* Name of the database */
- const void *pKey, int nKey /* The key */
-);
-
-/*
-** Change the key on an open database. If the current database is not
-** encrypted, this routine will encrypt it. If pNew==0 or nNew==0, the
-** database is decrypted.
-**
-** The code to implement this API is not available in the public release
-** of SQLite.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rekey(
- sqlite3 *db, /* Database to be rekeyed */
- const void *pKey, int nKey /* The new key */
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_rekey_v2(
- sqlite3 *db, /* Database to be rekeyed */
- const char *zDbName, /* Name of the database */
- const void *pKey, int nKey /* The new key */
-);
-
-/*
-** Specify the activation key for a SEE database. Unless
-** activated, none of the SEE routines will work.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_activate_see(
- const char *zPassPhrase /* Activation phrase */
-);
-#endif
-
-#ifdef SQLITE_ENABLE_CEROD
-/*
-** Specify the activation key for a CEROD database. Unless
-** activated, none of the CEROD routines will work.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_activate_cerod(
- const char *zPassPhrase /* Activation phrase */
-);
-#endif
-
-/*
-** CAPI3REF: Suspend Execution For A Short Time
-**
-** The sqlite3_sleep() function causes the current thread to suspend execution
-** for at least a number of milliseconds specified in its parameter.
-**
-** If the operating system does not support sleep requests with
-** millisecond time resolution, then the time will be rounded up to
-** the nearest second. The number of milliseconds of sleep actually
-** requested from the operating system is returned.
-**
-** ^SQLite implements this interface by calling the xSleep()
-** method of the default [sqlite3_vfs] object. If the xSleep() method
-** of the default VFS is not implemented correctly, or not implemented at
-** all, then the behavior of sqlite3_sleep() may deviate from the description
-** in the previous paragraphs.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_sleep(int);
-
-/*
-** CAPI3REF: Name Of The Folder Holding Temporary Files
-**
-** ^(If this global variable is made to point to a string which is
-** the name of a folder (a.k.a. directory), then all temporary files
-** created by SQLite when using a built-in [sqlite3_vfs | VFS]
-** will be placed in that directory.)^ ^If this variable
-** is a NULL pointer, then SQLite performs a search for an appropriate
-** temporary file directory.
-**
-** Applications are strongly discouraged from using this global variable.
-** It is required to set a temporary folder on Windows Runtime (WinRT).
-** But for all other platforms, it is highly recommended that applications
-** neither read nor write this variable. This global variable is a relic
-** that exists for backwards compatibility of legacy applications and should
-** be avoided in new projects.
-**
-** It is not safe to read or modify this variable in more than one
-** thread at a time. It is not safe to read or modify this variable
-** if a [database connection] is being used at the same time in a separate
-** thread.
-** It is intended that this variable be set once
-** as part of process initialization and before any SQLite interface
-** routines have been called and that this variable remain unchanged
-** thereafter.
-**
-** ^The [temp_store_directory pragma] may modify this variable and cause
-** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore,
-** the [temp_store_directory pragma] always assumes that any string
-** that this variable points to is held in memory obtained from
-** [sqlite3_malloc] and the pragma may attempt to free that memory
-** using [sqlite3_free].
-** Hence, if this variable is modified directly, either it should be
-** made NULL or made to point to memory obtained from [sqlite3_malloc]
-** or else the use of the [temp_store_directory pragma] should be avoided.
-** Except when requested by the [temp_store_directory pragma], SQLite
-** does not free the memory that sqlite3_temp_directory points to. If
-** the application wants that memory to be freed, it must do
-** so itself, taking care to only do so after all [database connection]
-** objects have been destroyed.
-**
-** Note to Windows Runtime users: The temporary directory must be set
-** prior to calling [sqlite3_open] or [sqlite3_open_v2]. Otherwise, various
-** features that require the use of temporary files may fail. Here is an
-** example of how to do this using C++ with the Windows Runtime:
-**
-**
-** LPCWSTR zPath = Windows::Storage::ApplicationData::Current->
-** TemporaryFolder->Path->Data();
-** char zPathBuf[MAX_PATH + 1];
-** memset(zPathBuf, 0, sizeof(zPathBuf));
-** WideCharToMultiByte(CP_UTF8, 0, zPath, -1, zPathBuf, sizeof(zPathBuf),
-** NULL, NULL);
-** sqlite3_temp_directory = sqlite3_mprintf("%s", zPathBuf);
-**
-*/
-SQLITE_API char *sqlite3_temp_directory;
-
-/*
-** CAPI3REF: Name Of The Folder Holding Database Files
-**
-** ^(If this global variable is made to point to a string which is
-** the name of a folder (a.k.a. directory), then all database files
-** specified with a relative pathname and created or accessed by
-** SQLite when using a built-in windows [sqlite3_vfs | VFS] will be assumed
-** to be relative to that directory.)^ ^If this variable is a NULL
-** pointer, then SQLite assumes that all database files specified
-** with a relative pathname are relative to the current directory
-** for the process. Only the windows VFS makes use of this global
-** variable; it is ignored by the unix VFS.
-**
-** Changing the value of this variable while a database connection is
-** open can result in a corrupt database.
-**
-** It is not safe to read or modify this variable in more than one
-** thread at a time. It is not safe to read or modify this variable
-** if a [database connection] is being used at the same time in a separate
-** thread.
-** It is intended that this variable be set once
-** as part of process initialization and before any SQLite interface
-** routines have been called and that this variable remain unchanged
-** thereafter.
-**
-** ^The [data_store_directory pragma] may modify this variable and cause
-** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore,
-** the [data_store_directory pragma] always assumes that any string
-** that this variable points to is held in memory obtained from
-** [sqlite3_malloc] and the pragma may attempt to free that memory
-** using [sqlite3_free].
-** Hence, if this variable is modified directly, either it should be
-** made NULL or made to point to memory obtained from [sqlite3_malloc]
-** or else the use of the [data_store_directory pragma] should be avoided.
-*/
-SQLITE_API char *sqlite3_data_directory;
-
-/*
-** CAPI3REF: Test For Auto-Commit Mode
-** KEYWORDS: {autocommit mode}
-** METHOD: sqlite3
-**
-** ^The sqlite3_get_autocommit() interface returns non-zero or
-** zero if the given database connection is or is not in autocommit mode,
-** respectively. ^Autocommit mode is on by default.
-** ^Autocommit mode is disabled by a [BEGIN] statement.
-** ^Autocommit mode is re-enabled by a [COMMIT] or [ROLLBACK].
-**
-** If certain kinds of errors occur on a statement within a multi-statement
-** transaction (errors including [SQLITE_FULL], [SQLITE_IOERR],
-** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the
-** transaction might be rolled back automatically. The only way to
-** find out whether SQLite automatically rolled back the transaction after
-** an error is to use this function.
-**
-** If another thread changes the autocommit status of the database
-** connection while this routine is running, then the return value
-** is undefined.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_get_autocommit(sqlite3*);
-
-/*
-** CAPI3REF: Find The Database Handle Of A Prepared Statement
-** METHOD: sqlite3_stmt
-**
-** ^The sqlite3_db_handle interface returns the [database connection] handle
-** to which a [prepared statement] belongs. ^The [database connection]
-** returned by sqlite3_db_handle is the same [database connection]
-** that was the first argument
-** to the [sqlite3_prepare_v2()] call (or its variants) that was used to
-** create the statement in the first place.
-*/
-SQLITE_API sqlite3 *SQLITE_STDCALL sqlite3_db_handle(sqlite3_stmt*);
-
-/*
-** CAPI3REF: Return The Filename For A Database Connection
-** METHOD: sqlite3
-**
-** ^The sqlite3_db_filename(D,N) interface returns a pointer to a filename
-** associated with database N of connection D. ^The main database file
-** has the name "main". If there is no attached database N on the database
-** connection D, or if database N is a temporary or in-memory database, then
-** a NULL pointer is returned.
-**
-** ^The filename returned by this function is the output of the
-** xFullPathname method of the [VFS]. ^In other words, the filename
-** will be an absolute pathname, even if the filename used
-** to open the database originally was a URI or relative pathname.
-*/
-SQLITE_API const char *SQLITE_STDCALL sqlite3_db_filename(sqlite3 *db, const char *zDbName);
-
-/*
-** CAPI3REF: Determine if a database is read-only
-** METHOD: sqlite3
-**
-** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N
-** of connection D is read-only, 0 if it is read/write, or -1 if N is not
-** the name of a database on connection D.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
-
-/*
-** CAPI3REF: Find the next prepared statement
-** METHOD: sqlite3
-**
-** ^This interface returns a pointer to the next [prepared statement] after
-** pStmt associated with the [database connection] pDb. ^If pStmt is NULL
-** then this interface returns a pointer to the first prepared statement
-** associated with the database connection pDb. ^If no prepared statement
-** satisfies the conditions of this routine, it returns NULL.
-**
-** The [database connection] pointer D in a call to
-** [sqlite3_next_stmt(D,S)] must refer to an open database
-** connection and in particular must not be a NULL pointer.
-*/
-SQLITE_API sqlite3_stmt *SQLITE_STDCALL sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt);
-
-/*
-** CAPI3REF: Commit And Rollback Notification Callbacks
-** METHOD: sqlite3
-**
-** ^The sqlite3_commit_hook() interface registers a callback
-** function to be invoked whenever a transaction is [COMMIT | committed].
-** ^Any callback set by a previous call to sqlite3_commit_hook()
-** for the same database connection is overridden.
-** ^The sqlite3_rollback_hook() interface registers a callback
-** function to be invoked whenever a transaction is [ROLLBACK | rolled back].
-** ^Any callback set by a previous call to sqlite3_rollback_hook()
-** for the same database connection is overridden.
-** ^The pArg argument is passed through to the callback.
-** ^If the callback on a commit hook function returns non-zero,
-** then the commit is converted into a rollback.
-**
-** ^The sqlite3_commit_hook(D,C,P) and sqlite3_rollback_hook(D,C,P) functions
-** return the P argument from the previous call of the same function
-** on the same [database connection] D, or NULL for
-** the first call for each function on D.
-**
-** The commit and rollback hook callbacks are not reentrant.
-** The callback implementation must not do anything that will modify
-** the database connection that invoked the callback. Any actions
-** to modify the database connection must be deferred until after the
-** completion of the [sqlite3_step()] call that triggered the commit
-** or rollback hook in the first place.
-** Note that running any other SQL statements, including SELECT statements,
-** or merely calling [sqlite3_prepare_v2()] and [sqlite3_step()] will modify
-** the database connections for the meaning of "modify" in this paragraph.
-**
-** ^Registering a NULL function disables the callback.
-**
-** ^When the commit hook callback routine returns zero, the [COMMIT]
-** operation is allowed to continue normally. ^If the commit hook
-** returns non-zero, then the [COMMIT] is converted into a [ROLLBACK].
-** ^The rollback hook is invoked on a rollback that results from a commit
-** hook returning non-zero, just as it would be with any other rollback.
-**
-** ^For the purposes of this API, a transaction is said to have been
-** rolled back if an explicit "ROLLBACK" statement is executed, or
-** an error or constraint causes an implicit rollback to occur.
-** ^The rollback callback is not invoked if a transaction is
-** automatically rolled back because the database connection is closed.
-**
-** See also the [sqlite3_update_hook()] interface.
-*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_commit_hook(sqlite3*, int(*)(void*), void*);
-SQLITE_API void *SQLITE_STDCALL sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
-
-/*
-** CAPI3REF: Data Change Notification Callbacks
-** METHOD: sqlite3
-**
-** ^The sqlite3_update_hook() interface registers a callback function
-** with the [database connection] identified by the first argument
-** to be invoked whenever a row is updated, inserted or deleted in
-** a [rowid table].
-** ^Any callback set by a previous call to this function
-** for the same database connection is overridden.
-**
-** ^The second argument is a pointer to the function to invoke when a
-** row is updated, inserted or deleted in a rowid table.
-** ^The first argument to the callback is a copy of the third argument
-** to sqlite3_update_hook().
-** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE],
-** or [SQLITE_UPDATE], depending on the operation that caused the callback
-** to be invoked.
-** ^The third and fourth arguments to the callback contain pointers to the
-** database and table name containing the affected row.
-** ^The final callback parameter is the [rowid] of the row.
-** ^In the case of an update, this is the [rowid] after the update takes place.
-**
-** ^(The update hook is not invoked when internal system tables are
-** modified (i.e. sqlite_master and sqlite_sequence).)^
-** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified.
-**
-** ^In the current implementation, the update hook
-** is not invoked when duplication rows are deleted because of an
-** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook
-** invoked when rows are deleted using the [truncate optimization].
-** The exceptions defined in this paragraph might change in a future
-** release of SQLite.
-**
-** The update hook implementation must not do anything that will modify
-** the database connection that invoked the update hook. Any actions
-** to modify the database connection must be deferred until after the
-** completion of the [sqlite3_step()] call that triggered the update hook.
-** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
-** database connections for the meaning of "modify" in this paragraph.
-**
-** ^The sqlite3_update_hook(D,C,P) function
-** returns the P argument from the previous call
-** on the same [database connection] D, or NULL for
-** the first call on D.
-**
-** See also the [sqlite3_commit_hook()], [sqlite3_rollback_hook()],
-** and [sqlite3_preupdate_hook()] interfaces.
-*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_update_hook(
- sqlite3*,
- void(*)(void *,int ,char const *,char const *,sqlite3_int64),
- void*
-);
-
-/*
-** CAPI3REF: Enable Or Disable Shared Pager Cache
-**
-** ^(This routine enables or disables the sharing of the database cache
-** and schema data structures between [database connection | connections]
-** to the same database. Sharing is enabled if the argument is true
-** and disabled if the argument is false.)^
-**
-** ^Cache sharing is enabled and disabled for an entire process.
-** This is a change as of SQLite version 3.5.0. In prior versions of SQLite,
-** sharing was enabled or disabled for each thread separately.
-**
-** ^(The cache sharing mode set by this interface effects all subsequent
-** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()].
-** Existing database connections continue use the sharing mode
-** that was in effect at the time they were opened.)^
-**
-** ^(This routine returns [SQLITE_OK] if shared cache was enabled or disabled
-** successfully. An [error code] is returned otherwise.)^
-**
-** ^Shared cache is disabled by default. But this might change in
-** future releases of SQLite. Applications that care about shared
-** cache setting should set it explicitly.
-**
-** Note: This method is disabled on MacOS X 10.7 and iOS version 5.0
-** and will always return SQLITE_MISUSE. On those systems,
-** shared cache mode should be enabled per-database connection via
-** [sqlite3_open_v2()] with [SQLITE_OPEN_SHAREDCACHE].
-**
-** This interface is threadsafe on processors where writing a
-** 32-bit integer is atomic.
-**
-** See Also: [SQLite Shared-Cache Mode]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_shared_cache(int);
-
-/*
-** CAPI3REF: Attempt To Free Heap Memory
-**
-** ^The sqlite3_release_memory() interface attempts to free N bytes
-** of heap memory by deallocating non-essential memory allocations
-** held by the database library. Memory used to cache database
-** pages to improve performance is an example of non-essential memory.
-** ^sqlite3_release_memory() returns the number of bytes actually freed,
-** which might be more or less than the amount requested.
-** ^The sqlite3_release_memory() routine is a no-op returning zero
-** if SQLite is not compiled with [SQLITE_ENABLE_MEMORY_MANAGEMENT].
-**
-** See also: [sqlite3_db_release_memory()]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int);
-
-/*
-** CAPI3REF: Free Memory Used By A Database Connection
-** METHOD: sqlite3
-**
-** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap
-** memory as possible from database connection D. Unlike the
-** [sqlite3_release_memory()] interface, this interface is in effect even
-** when the [SQLITE_ENABLE_MEMORY_MANAGEMENT] compile-time option is
-** omitted.
-**
-** See also: [sqlite3_release_memory()]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_release_memory(sqlite3*);
-
-/*
-** CAPI3REF: Impose A Limit On Heap Size
-**
-** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the
-** soft limit on the amount of heap memory that may be allocated by SQLite.
-** ^SQLite strives to keep heap memory utilization below the soft heap
-** limit by reducing the number of pages held in the page cache
-** as heap memory usages approaches the limit.
-** ^The soft heap limit is "soft" because even though SQLite strives to stay
-** below the limit, it will exceed the limit rather than generate
-** an [SQLITE_NOMEM] error. In other words, the soft heap limit
-** is advisory only.
-**
-** ^The return value from sqlite3_soft_heap_limit64() is the size of
-** the soft heap limit prior to the call, or negative in the case of an
-** error. ^If the argument N is negative
-** then no change is made to the soft heap limit. Hence, the current
-** size of the soft heap limit can be determined by invoking
-** sqlite3_soft_heap_limit64() with a negative argument.
-**
-** ^If the argument N is zero then the soft heap limit is disabled.
-**
-** ^(The soft heap limit is not enforced in the current implementation
-** if one or more of following conditions are true:
-**
-**
-** The soft heap limit is set to zero.
-** Memory accounting is disabled using a combination of the
-** [sqlite3_config]([SQLITE_CONFIG_MEMSTATUS],...) start-time option and
-** the [SQLITE_DEFAULT_MEMSTATUS] compile-time option.
-** An alternative page cache implementation is specified using
-** [sqlite3_config]([SQLITE_CONFIG_PCACHE2],...).
-** The page cache allocates from its own memory pool supplied
-** by [sqlite3_config]([SQLITE_CONFIG_PAGECACHE],...) rather than
-** from the heap.
-** )^
-**
-** Beginning with SQLite version 3.7.3, the soft heap limit is enforced
-** regardless of whether or not the [SQLITE_ENABLE_MEMORY_MANAGEMENT]
-** compile-time option is invoked. With [SQLITE_ENABLE_MEMORY_MANAGEMENT],
-** the soft heap limit is enforced on every memory allocation. Without
-** [SQLITE_ENABLE_MEMORY_MANAGEMENT], the soft heap limit is only enforced
-** when memory is allocated by the page cache. Testing suggests that because
-** the page cache is the predominate memory user in SQLite, most
-** applications will achieve adequate soft heap limit enforcement without
-** the use of [SQLITE_ENABLE_MEMORY_MANAGEMENT].
-**
-** The circumstances under which SQLite will enforce the soft heap limit may
-** changes in future releases of SQLite.
-*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64 N);
-
-/*
-** CAPI3REF: Deprecated Soft Heap Limit Interface
-** DEPRECATED
-**
-** This is a deprecated version of the [sqlite3_soft_heap_limit64()]
-** interface. This routine is provided for historical compatibility
-** only. All new applications should use the
-** [sqlite3_soft_heap_limit64()] interface rather than this one.
-*/
-SQLITE_API SQLITE_DEPRECATED void SQLITE_STDCALL sqlite3_soft_heap_limit(int N);
-
-
-/*
-** CAPI3REF: Extract Metadata About A Column Of A Table
-** METHOD: sqlite3
-**
-** ^(The sqlite3_table_column_metadata(X,D,T,C,....) routine returns
-** information about column C of table T in database D
-** on [database connection] X.)^ ^The sqlite3_table_column_metadata()
-** interface returns SQLITE_OK and fills in the non-NULL pointers in
-** the final five arguments with appropriate values if the specified
-** column exists. ^The sqlite3_table_column_metadata() interface returns
-** SQLITE_ERROR and if the specified column does not exist.
-** ^If the column-name parameter to sqlite3_table_column_metadata() is a
-** NULL pointer, then this routine simply checks for the existence of the
-** table and returns SQLITE_OK if the table exists and SQLITE_ERROR if it
-** does not.
-**
-** ^The column is identified by the second, third and fourth parameters to
-** this function. ^(The second parameter is either the name of the database
-** (i.e. "main", "temp", or an attached database) containing the specified
-** table or NULL.)^ ^If it is NULL, then all attached databases are searched
-** for the table using the same algorithm used by the database engine to
-** resolve unqualified table references.
-**
-** ^The third and fourth parameters to this function are the table and column
-** name of the desired column, respectively.
-**
-** ^Metadata is returned by writing to the memory locations passed as the 5th
-** and subsequent parameters to this function. ^Any of these arguments may be
-** NULL, in which case the corresponding element of metadata is omitted.
-**
-** ^(
-**
-** Parameter Output Type Description
-**
-** 5th const char* Data type
-** 6th const char* Name of default collation sequence
-** 7th int True if column has a NOT NULL constraint
-** 8th int True if column is part of the PRIMARY KEY
-** 9th int True if column is [AUTOINCREMENT]
-**
-** )^
-**
-** ^The memory pointed to by the character pointers returned for the
-** declaration type and collation sequence is valid until the next
-** call to any SQLite API function.
-**
-** ^If the specified table is actually a view, an [error code] is returned.
-**
-** ^If the specified column is "rowid", "oid" or "_rowid_" and the table
-** is not a [WITHOUT ROWID] table and an
-** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output
-** parameters are set for the explicitly declared column. ^(If there is no
-** [INTEGER PRIMARY KEY] column, then the outputs
-** for the [rowid] are set as follows:
-**
-**
-** data type: "INTEGER"
-** collation sequence: "BINARY"
-** not null: 0
-** primary key: 1
-** auto increment: 0
-** )^
-**
-** ^This function causes all database schemas to be read from disk and
-** parsed, if that has not already been done, and returns an error if
-** any errors are encountered while loading the schema.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_table_column_metadata(
- sqlite3 *db, /* Connection handle */
- const char *zDbName, /* Database name or NULL */
- const char *zTableName, /* Table name */
- const char *zColumnName, /* Column name */
- char const **pzDataType, /* OUTPUT: Declared data type */
- char const **pzCollSeq, /* OUTPUT: Collation sequence name */
- int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */
- int *pPrimaryKey, /* OUTPUT: True if column part of PK */
- int *pAutoinc /* OUTPUT: True if column is auto-increment */
-);
-
-/*
-** CAPI3REF: Load An Extension
-** METHOD: sqlite3
-**
-** ^This interface loads an SQLite extension library from the named file.
-**
-** ^The sqlite3_load_extension() interface attempts to load an
-** [SQLite extension] library contained in the file zFile. If
-** the file cannot be loaded directly, attempts are made to load
-** with various operating-system specific extensions added.
-** So for example, if "samplelib" cannot be loaded, then names like
-** "samplelib.so" or "samplelib.dylib" or "samplelib.dll" might
-** be tried also.
-**
-** ^The entry point is zProc.
-** ^(zProc may be 0, in which case SQLite will try to come up with an
-** entry point name on its own. It first tries "sqlite3_extension_init".
-** If that does not work, it constructs a name "sqlite3_X_init" where the
-** X is consists of the lower-case equivalent of all ASCII alphabetic
-** characters in the filename from the last "/" to the first following
-** "." and omitting any initial "lib".)^
-** ^The sqlite3_load_extension() interface returns
-** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong.
-** ^If an error occurs and pzErrMsg is not 0, then the
-** [sqlite3_load_extension()] interface shall attempt to
-** fill *pzErrMsg with error message text stored in memory
-** obtained from [sqlite3_malloc()]. The calling function
-** should free this memory by calling [sqlite3_free()].
-**
-** ^Extension loading must be enabled using
-** [sqlite3_enable_load_extension()] or
-** [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],1,NULL)
-** prior to calling this API,
-** otherwise an error will be returned.
-**
-** Security warning: It is recommended that the
-** [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method be used to enable only this
-** interface. The use of the [sqlite3_enable_load_extension()] interface
-** should be avoided. This will keep the SQL function [load_extension()]
-** disabled and prevent SQL injections from giving attackers
-** access to extension loading capabilities.
-**
-** See also the [load_extension() SQL function].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_load_extension(
- sqlite3 *db, /* Load the extension into this database connection */
- const char *zFile, /* Name of the shared library containing extension */
- const char *zProc, /* Entry point. Derived from zFile if 0 */
- char **pzErrMsg /* Put error message here if not 0 */
-);
-
-/*
-** CAPI3REF: Enable Or Disable Extension Loading
-** METHOD: sqlite3
-**
-** ^So as not to open security holes in older applications that are
-** unprepared to deal with [extension loading], and as a means of disabling
-** [extension loading] while evaluating user-entered SQL, the following API
-** is provided to turn the [sqlite3_load_extension()] mechanism on and off.
-**
-** ^Extension loading is off by default.
-** ^Call the sqlite3_enable_load_extension() routine with onoff==1
-** to turn extension loading on and call it with onoff==0 to turn
-** it back off again.
-**
-** ^This interface enables or disables both the C-API
-** [sqlite3_load_extension()] and the SQL function [load_extension()].
-** ^(Use [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],..)
-** to enable or disable only the C-API.)^
-**
-** Security warning: It is recommended that extension loading
-** be disabled using the [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method
-** rather than this interface, so the [load_extension()] SQL function
-** remains disabled. This will prevent SQL injections from giving attackers
-** access to extension loading capabilities.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_enable_load_extension(sqlite3 *db, int onoff);
-
-/*
-** CAPI3REF: Automatically Load Statically Linked Extensions
-**
-** ^This interface causes the xEntryPoint() function to be invoked for
-** each new [database connection] that is created. The idea here is that
-** xEntryPoint() is the entry point for a statically linked [SQLite extension]
-** that is to be automatically loaded into all new database connections.
-**
-** ^(Even though the function prototype shows that xEntryPoint() takes
-** no arguments and returns void, SQLite invokes xEntryPoint() with three
-** arguments and expects an integer result as if the signature of the
-** entry point where as follows:
-**
-**
-** int xEntryPoint(
-** sqlite3 *db,
-** const char **pzErrMsg,
-** const struct sqlite3_api_routines *pThunk
-** );
-** )^
-**
-** If the xEntryPoint routine encounters an error, it should make *pzErrMsg
-** point to an appropriate error message (obtained from [sqlite3_mprintf()])
-** and return an appropriate [error code]. ^SQLite ensures that *pzErrMsg
-** is NULL before calling the xEntryPoint(). ^SQLite will invoke
-** [sqlite3_free()] on *pzErrMsg after xEntryPoint() returns. ^If any
-** xEntryPoint() returns an error, the [sqlite3_open()], [sqlite3_open16()],
-** or [sqlite3_open_v2()] call that provoked the xEntryPoint() will fail.
-**
-** ^Calling sqlite3_auto_extension(X) with an entry point X that is already
-** on the list of automatic extensions is a harmless no-op. ^No entry point
-** will be called more than once for each database connection that is opened.
-**
-** See also: [sqlite3_reset_auto_extension()]
-** and [sqlite3_cancel_auto_extension()]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_auto_extension(void(*xEntryPoint)(void));
-
-/*
-** CAPI3REF: Cancel Automatic Extension Loading
-**
-** ^The [sqlite3_cancel_auto_extension(X)] interface unregisters the
-** initialization routine X that was registered using a prior call to
-** [sqlite3_auto_extension(X)]. ^The [sqlite3_cancel_auto_extension(X)]
-** routine returns 1 if initialization routine X was successfully
-** unregistered and it returns 0 if X was not on the list of initialization
-** routines.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
-
-/*
-** CAPI3REF: Reset Automatic Extension Loading
-**
-** ^This interface disables all automatic extensions previously
-** registered using [sqlite3_auto_extension()].
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_reset_auto_extension(void);
-
-/*
-** The interface to the virtual-table mechanism is currently considered
-** to be experimental. The interface might change in incompatible ways.
-** If this is a problem for you, do not use the interface at this time.
-**
-** When the virtual-table mechanism stabilizes, we will declare the
-** interface fixed, support it indefinitely, and remove this comment.
-*/
-
-/*
-** Structures used by the virtual table interface
-*/
-typedef struct sqlite3_vtab sqlite3_vtab;
-typedef struct sqlite3_index_info sqlite3_index_info;
-typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor;
-typedef struct sqlite3_module sqlite3_module;
-
-/*
-** CAPI3REF: Virtual Table Object
-** KEYWORDS: sqlite3_module {virtual table module}
-**
-** This structure, sometimes called a "virtual table module",
-** defines the implementation of a [virtual tables].
-** This structure consists mostly of methods for the module.
-**
-** ^A virtual table module is created by filling in a persistent
-** instance of this structure and passing a pointer to that instance
-** to [sqlite3_create_module()] or [sqlite3_create_module_v2()].
-** ^The registration remains valid until it is replaced by a different
-** module or until the [database connection] closes. The content
-** of this structure must not change while it is registered with
-** any database connection.
-*/
-struct sqlite3_module {
- int iVersion;
- int (*xCreate)(sqlite3*, void *pAux,
- int argc, const char *const*argv,
- sqlite3_vtab **ppVTab, char**);
- int (*xConnect)(sqlite3*, void *pAux,
- int argc, const char *const*argv,
- sqlite3_vtab **ppVTab, char**);
- int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*);
- int (*xDisconnect)(sqlite3_vtab *pVTab);
- int (*xDestroy)(sqlite3_vtab *pVTab);
- int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor);
- int (*xClose)(sqlite3_vtab_cursor*);
- int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr,
- int argc, sqlite3_value **argv);
- int (*xNext)(sqlite3_vtab_cursor*);
- int (*xEof)(sqlite3_vtab_cursor*);
- int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int);
- int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid);
- int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *);
- int (*xBegin)(sqlite3_vtab *pVTab);
- int (*xSync)(sqlite3_vtab *pVTab);
- int (*xCommit)(sqlite3_vtab *pVTab);
- int (*xRollback)(sqlite3_vtab *pVTab);
- int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName,
- void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
- void **ppArg);
- int (*xRename)(sqlite3_vtab *pVtab, const char *zNew);
- /* The methods above are in version 1 of the sqlite_module object. Those
- ** below are for version 2 and greater. */
- int (*xSavepoint)(sqlite3_vtab *pVTab, int);
- int (*xRelease)(sqlite3_vtab *pVTab, int);
- int (*xRollbackTo)(sqlite3_vtab *pVTab, int);
-};
-
-/*
-** CAPI3REF: Virtual Table Indexing Information
-** KEYWORDS: sqlite3_index_info
-**
-** The sqlite3_index_info structure and its substructures is used as part
-** of the [virtual table] interface to
-** pass information into and receive the reply from the [xBestIndex]
-** method of a [virtual table module]. The fields under **Inputs** are the
-** inputs to xBestIndex and are read-only. xBestIndex inserts its
-** results into the **Outputs** fields.
-**
-** ^(The aConstraint[] array records WHERE clause constraints of the form:
-**
-** column OP expr
-**
-** where OP is =, <, <=, >, or >=.)^ ^(The particular operator is
-** stored in aConstraint[].op using one of the
-** [SQLITE_INDEX_CONSTRAINT_EQ | SQLITE_INDEX_CONSTRAINT_ values].)^
-** ^(The index of the column is stored in
-** aConstraint[].iColumn.)^ ^(aConstraint[].usable is TRUE if the
-** expr on the right-hand side can be evaluated (and thus the constraint
-** is usable) and false if it cannot.)^
-**
-** ^The optimizer automatically inverts terms of the form "expr OP column"
-** and makes other simplifications to the WHERE clause in an attempt to
-** get as many WHERE clause terms into the form shown above as possible.
-** ^The aConstraint[] array only reports WHERE clause terms that are
-** relevant to the particular virtual table being queried.
-**
-** ^Information about the ORDER BY clause is stored in aOrderBy[].
-** ^Each term of aOrderBy records a column of the ORDER BY clause.
-**
-** The colUsed field indicates which columns of the virtual table may be
-** required by the current scan. Virtual table columns are numbered from
-** zero in the order in which they appear within the CREATE TABLE statement
-** passed to sqlite3_declare_vtab(). For the first 63 columns (columns 0-62),
-** the corresponding bit is set within the colUsed mask if the column may be
-** required by SQLite. If the table has at least 64 columns and any column
-** to the right of the first 63 is required, then bit 63 of colUsed is also
-** set. In other words, column iCol may be required if the expression
-** (colUsed & ((sqlite3_uint64)1 << (iCol>=63 ? 63 : iCol))) evaluates to
-** non-zero.
-**
-** The [xBestIndex] method must fill aConstraintUsage[] with information
-** about what parameters to pass to xFilter. ^If argvIndex>0 then
-** the right-hand side of the corresponding aConstraint[] is evaluated
-** and becomes the argvIndex-th entry in argv. ^(If aConstraintUsage[].omit
-** is true, then the constraint is assumed to be fully handled by the
-** virtual table and is not checked again by SQLite.)^
-**
-** ^The idxNum and idxPtr values are recorded and passed into the
-** [xFilter] method.
-** ^[sqlite3_free()] is used to free idxPtr if and only if
-** needToFreeIdxPtr is true.
-**
-** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in
-** the correct order to satisfy the ORDER BY clause so that no separate
-** sorting step is required.
-**
-** ^The estimatedCost value is an estimate of the cost of a particular
-** strategy. A cost of N indicates that the cost of the strategy is similar
-** to a linear scan of an SQLite table with N rows. A cost of log(N)
-** indicates that the expense of the operation is similar to that of a
-** binary search on a unique indexed field of an SQLite table with N rows.
-**
-** ^The estimatedRows value is an estimate of the number of rows that
-** will be returned by the strategy.
-**
-** The xBestIndex method may optionally populate the idxFlags field with a
-** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag -
-** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite
-** assumes that the strategy may visit at most one row.
-**
-** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then
-** SQLite also assumes that if a call to the xUpdate() method is made as
-** part of the same statement to delete or update a virtual table row and the
-** implementation returns SQLITE_CONSTRAINT, then there is no need to rollback
-** any database changes. In other words, if the xUpdate() returns
-** SQLITE_CONSTRAINT, the database contents must be exactly as they were
-** before xUpdate was called. By contrast, if SQLITE_INDEX_SCAN_UNIQUE is not
-** set and xUpdate returns SQLITE_CONSTRAINT, any database changes made by
-** the xUpdate method are automatically rolled back by SQLite.
-**
-** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info
-** structure for SQLite version 3.8.2. If a virtual table extension is
-** used with an SQLite version earlier than 3.8.2, the results of attempting
-** to read or write the estimatedRows field are undefined (but are likely
-** to included crashing the application). The estimatedRows field should
-** therefore only be used if [sqlite3_libversion_number()] returns a
-** value greater than or equal to 3008002. Similarly, the idxFlags field
-** was added for version 3.9.0. It may therefore only be used if
-** sqlite3_libversion_number() returns a value greater than or equal to
-** 3009000.
-*/
-struct sqlite3_index_info {
- /* Inputs */
- int nConstraint; /* Number of entries in aConstraint */
- struct sqlite3_index_constraint {
- int iColumn; /* Column constrained. -1 for ROWID */
- unsigned char op; /* Constraint operator */
- unsigned char usable; /* True if this constraint is usable */
- int iTermOffset; /* Used internally - xBestIndex should ignore */
- } *aConstraint; /* Table of WHERE clause constraints */
- int nOrderBy; /* Number of terms in the ORDER BY clause */
- struct sqlite3_index_orderby {
- int iColumn; /* Column number */
- unsigned char desc; /* True for DESC. False for ASC. */
- } *aOrderBy; /* The ORDER BY clause */
- /* Outputs */
- struct sqlite3_index_constraint_usage {
- int argvIndex; /* if >0, constraint is part of argv to xFilter */
- unsigned char omit; /* Do not code a test for this constraint */
- } *aConstraintUsage;
- int idxNum; /* Number used to identify the index */
- char *idxStr; /* String, possibly obtained from sqlite3_malloc */
- int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */
- int orderByConsumed; /* True if output is already ordered */
- double estimatedCost; /* Estimated cost of using this index */
- /* Fields below are only available in SQLite 3.8.2 and later */
- sqlite3_int64 estimatedRows; /* Estimated number of rows returned */
- /* Fields below are only available in SQLite 3.9.0 and later */
- int idxFlags; /* Mask of SQLITE_INDEX_SCAN_* flags */
- /* Fields below are only available in SQLite 3.10.0 and later */
- sqlite3_uint64 colUsed; /* Input: Mask of columns used by statement */
-};
-
-/*
-** CAPI3REF: Virtual Table Scan Flags
-*/
-#define SQLITE_INDEX_SCAN_UNIQUE 1 /* Scan visits at most 1 row */
-
-/*
-** CAPI3REF: Virtual Table Constraint Operator Codes
-**
-** These macros defined the allowed values for the
-** [sqlite3_index_info].aConstraint[].op field. Each value represents
-** an operator that is part of a constraint term in the wHERE clause of
-** a query that uses a [virtual table].
-*/
-#define SQLITE_INDEX_CONSTRAINT_EQ 2
-#define SQLITE_INDEX_CONSTRAINT_GT 4
-#define SQLITE_INDEX_CONSTRAINT_LE 8
-#define SQLITE_INDEX_CONSTRAINT_LT 16
-#define SQLITE_INDEX_CONSTRAINT_GE 32
-#define SQLITE_INDEX_CONSTRAINT_MATCH 64
-#define SQLITE_INDEX_CONSTRAINT_LIKE 65
-#define SQLITE_INDEX_CONSTRAINT_GLOB 66
-#define SQLITE_INDEX_CONSTRAINT_REGEXP 67
-
-/*
-** CAPI3REF: Register A Virtual Table Implementation
-** METHOD: sqlite3
-**
-** ^These routines are used to register a new [virtual table module] name.
-** ^Module names must be registered before
-** creating a new [virtual table] using the module and before using a
-** preexisting [virtual table] for the module.
-**
-** ^The module name is registered on the [database connection] specified
-** by the first parameter. ^The name of the module is given by the
-** second parameter. ^The third parameter is a pointer to
-** the implementation of the [virtual table module]. ^The fourth
-** parameter is an arbitrary client data pointer that is passed through
-** into the [xCreate] and [xConnect] methods of the virtual table module
-** when a new virtual table is be being created or reinitialized.
-**
-** ^The sqlite3_create_module_v2() interface has a fifth parameter which
-** is a pointer to a destructor for the pClientData. ^SQLite will
-** invoke the destructor function (if it is not NULL) when SQLite
-** no longer needs the pClientData pointer. ^The destructor will also
-** be invoked if the call to sqlite3_create_module_v2() fails.
-** ^The sqlite3_create_module()
-** interface is equivalent to sqlite3_create_module_v2() with a NULL
-** destructor.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module(
- sqlite3 *db, /* SQLite connection to register module with */
- const char *zName, /* Name of the module */
- const sqlite3_module *p, /* Methods for the module */
- void *pClientData /* Client data for xCreate/xConnect */
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_create_module_v2(
- sqlite3 *db, /* SQLite connection to register module with */
- const char *zName, /* Name of the module */
- const sqlite3_module *p, /* Methods for the module */
- void *pClientData, /* Client data for xCreate/xConnect */
- void(*xDestroy)(void*) /* Module destructor function */
-);
-
-/*
-** CAPI3REF: Virtual Table Instance Object
-** KEYWORDS: sqlite3_vtab
-**
-** Every [virtual table module] implementation uses a subclass
-** of this object to describe a particular instance
-** of the [virtual table]. Each subclass will
-** be tailored to the specific needs of the module implementation.
-** The purpose of this superclass is to define certain fields that are
-** common to all module implementations.
-**
-** ^Virtual tables methods can set an error message by assigning a
-** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should
-** take care that any prior string is freed by a call to [sqlite3_free()]
-** prior to assigning a new string to zErrMsg. ^After the error message
-** is delivered up to the client application, the string will be automatically
-** freed by sqlite3_free() and the zErrMsg field will be zeroed.
-*/
-struct sqlite3_vtab {
- const sqlite3_module *pModule; /* The module for this virtual table */
- int nRef; /* Number of open cursors */
- char *zErrMsg; /* Error message from sqlite3_mprintf() */
- /* Virtual table implementations will typically add additional fields */
-};
-
-/*
-** CAPI3REF: Virtual Table Cursor Object
-** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor}
-**
-** Every [virtual table module] implementation uses a subclass of the
-** following structure to describe cursors that point into the
-** [virtual table] and are used
-** to loop through the virtual table. Cursors are created using the
-** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed
-** by the [sqlite3_module.xClose | xClose] method. Cursors are used
-** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods
-** of the module. Each module implementation will define
-** the content of a cursor structure to suit its own needs.
-**
-** This superclass exists in order to define fields of the cursor that
-** are common to all implementations.
-*/
-struct sqlite3_vtab_cursor {
- sqlite3_vtab *pVtab; /* Virtual table of this cursor */
- /* Virtual table implementations will typically add additional fields */
-};
-
-/*
-** CAPI3REF: Declare The Schema Of A Virtual Table
-**
-** ^The [xCreate] and [xConnect] methods of a
-** [virtual table module] call this interface
-** to declare the format (the names and datatypes of the columns) of
-** the virtual tables they implement.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_declare_vtab(sqlite3*, const char *zSQL);
-
-/*
-** CAPI3REF: Overload A Function For A Virtual Table
-** METHOD: sqlite3
-**
-** ^(Virtual tables can provide alternative implementations of functions
-** using the [xFindFunction] method of the [virtual table module].
-** But global versions of those functions
-** must exist in order to be overloaded.)^
-**
-** ^(This API makes sure a global version of a function with a particular
-** name and number of parameters exists. If no such function exists
-** before this API is called, a new function is created.)^ ^The implementation
-** of the new function always causes an exception to be thrown. So
-** the new function is not good for anything by itself. Its only
-** purpose is to be a placeholder function that can be overloaded
-** by a [virtual table].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
-
-/*
-** The interface to the virtual-table mechanism defined above (back up
-** to a comment remarkably similar to this one) is currently considered
-** to be experimental. The interface might change in incompatible ways.
-** If this is a problem for you, do not use the interface at this time.
-**
-** When the virtual-table mechanism stabilizes, we will declare the
-** interface fixed, support it indefinitely, and remove this comment.
-*/
-
-/*
-** CAPI3REF: A Handle To An Open BLOB
-** KEYWORDS: {BLOB handle} {BLOB handles}
-**
-** An instance of this object represents an open BLOB on which
-** [sqlite3_blob_open | incremental BLOB I/O] can be performed.
-** ^Objects of this type are created by [sqlite3_blob_open()]
-** and destroyed by [sqlite3_blob_close()].
-** ^The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces
-** can be used to read or write small subsections of the BLOB.
-** ^The [sqlite3_blob_bytes()] interface returns the size of the BLOB in bytes.
-*/
-typedef struct sqlite3_blob sqlite3_blob;
-
-/*
-** CAPI3REF: Open A BLOB For Incremental I/O
-** METHOD: sqlite3
-** CONSTRUCTOR: sqlite3_blob
-**
-** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located
-** in row iRow, column zColumn, table zTable in database zDb;
-** in other words, the same BLOB that would be selected by:
-**
-**
-** SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
-** )^
-**
-** ^(Parameter zDb is not the filename that contains the database, but
-** rather the symbolic name of the database. For attached databases, this is
-** the name that appears after the AS keyword in the [ATTACH] statement.
-** For the main database file, the database name is "main". For TEMP
-** tables, the database name is "temp".)^
-**
-** ^If the flags parameter is non-zero, then the BLOB is opened for read
-** and write access. ^If the flags parameter is zero, the BLOB is opened for
-** read-only access.
-**
-** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is stored
-** in *ppBlob. Otherwise an [error code] is returned and, unless the error
-** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided
-** the API is not misused, it is always safe to call [sqlite3_blob_close()]
-** on *ppBlob after this function it returns.
-**
-** This function fails with SQLITE_ERROR if any of the following are true:
-**
-** ^(Database zDb does not exist)^,
-** ^(Table zTable does not exist within database zDb)^,
-** ^(Table zTable is a WITHOUT ROWID table)^,
-** ^(Column zColumn does not exist)^,
-** ^(Row iRow is not present in the table)^,
-** ^(The specified column of row iRow contains a value that is not
-** a TEXT or BLOB value)^,
-** ^(Column zColumn is part of an index, PRIMARY KEY or UNIQUE
-** constraint and the blob is being opened for read/write access)^,
-** ^([foreign key constraints | Foreign key constraints] are enabled,
-** column zColumn is part of a [child key] definition and the blob is
-** being opened for read/write access)^.
-**
-**
-** ^Unless it returns SQLITE_MISUSE, this function sets the
-** [database connection] error code and message accessible via
-** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
-**
-**
-** ^(If the row that a BLOB handle points to is modified by an
-** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects
-** then the BLOB handle is marked as "expired".
-** This is true if any column of the row is changed, even a column
-** other than the one the BLOB handle is open on.)^
-** ^Calls to [sqlite3_blob_read()] and [sqlite3_blob_write()] for
-** an expired BLOB handle fail with a return code of [SQLITE_ABORT].
-** ^(Changes written into a BLOB prior to the BLOB expiring are not
-** rolled back by the expiration of the BLOB. Such changes will eventually
-** commit if the transaction continues to completion.)^
-**
-** ^Use the [sqlite3_blob_bytes()] interface to determine the size of
-** the opened blob. ^The size of a blob may not be changed by this
-** interface. Use the [UPDATE] SQL command to change the size of a
-** blob.
-**
-** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces
-** and the built-in [zeroblob] SQL function may be used to create a
-** zero-filled blob to read or write using the incremental-blob interface.
-**
-** To avoid a resource leak, every open [BLOB handle] should eventually
-** be released by a call to [sqlite3_blob_close()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_open(
- sqlite3*,
- const char *zDb,
- const char *zTable,
- const char *zColumn,
- sqlite3_int64 iRow,
- int flags,
- sqlite3_blob **ppBlob
-);
-
-/*
-** CAPI3REF: Move a BLOB Handle to a New Row
-** METHOD: sqlite3_blob
-**
-** ^This function is used to move an existing blob handle so that it points
-** to a different row of the same database table. ^The new row is identified
-** by the rowid value passed as the second argument. Only the row can be
-** changed. ^The database, table and column on which the blob handle is open
-** remain the same. Moving an existing blob handle to a new row can be
-** faster than closing the existing handle and opening a new one.
-**
-** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] -
-** it must exist and there must be either a blob or text value stored in
-** the nominated column.)^ ^If the new row is not present in the table, or if
-** it does not contain a blob or text value, or if another error occurs, an
-** SQLite error code is returned and the blob handle is considered aborted.
-** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or
-** [sqlite3_blob_reopen()] on an aborted blob handle immediately return
-** SQLITE_ABORT. ^Calling [sqlite3_blob_bytes()] on an aborted blob handle
-** always returns zero.
-**
-** ^This function sets the database handle error code and message.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
-
-/*
-** CAPI3REF: Close A BLOB Handle
-** DESTRUCTOR: sqlite3_blob
-**
-** ^This function closes an open [BLOB handle]. ^(The BLOB handle is closed
-** unconditionally. Even if this routine returns an error code, the
-** handle is still closed.)^
-**
-** ^If the blob handle being closed was opened for read-write access, and if
-** the database is in auto-commit mode and there are no other open read-write
-** blob handles or active write statements, the current transaction is
-** committed. ^If an error occurs while committing the transaction, an error
-** code is returned and the transaction rolled back.
-**
-** Calling this function with an argument that is not a NULL pointer or an
-** open blob handle results in undefined behaviour. ^Calling this routine
-** with a null pointer (such as would be returned by a failed call to
-** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function
-** is passed a valid open blob handle, the values returned by the
-** sqlite3_errcode() and sqlite3_errmsg() functions are set before returning.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_close(sqlite3_blob *);
-
-/*
-** CAPI3REF: Return The Size Of An Open BLOB
-** METHOD: sqlite3_blob
-**
-** ^Returns the size in bytes of the BLOB accessible via the
-** successfully opened [BLOB handle] in its only argument. ^The
-** incremental blob I/O routines can only read or overwriting existing
-** blob content; they cannot change the size of a blob.
-**
-** This routine only works on a [BLOB handle] which has been created
-** by a prior successful call to [sqlite3_blob_open()] and which has not
-** been closed by [sqlite3_blob_close()]. Passing any other pointer in
-** to this routine results in undefined and probably undesirable behavior.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_bytes(sqlite3_blob *);
-
-/*
-** CAPI3REF: Read Data From A BLOB Incrementally
-** METHOD: sqlite3_blob
-**
-** ^(This function is used to read data from an open [BLOB handle] into a
-** caller-supplied buffer. N bytes of data are copied into buffer Z
-** from the open BLOB, starting at offset iOffset.)^
-**
-** ^If offset iOffset is less than N bytes from the end of the BLOB,
-** [SQLITE_ERROR] is returned and no data is read. ^If N or iOffset is
-** less than zero, [SQLITE_ERROR] is returned and no data is read.
-** ^The size of the blob (and hence the maximum value of N+iOffset)
-** can be determined using the [sqlite3_blob_bytes()] interface.
-**
-** ^An attempt to read from an expired [BLOB handle] fails with an
-** error code of [SQLITE_ABORT].
-**
-** ^(On success, sqlite3_blob_read() returns SQLITE_OK.
-** Otherwise, an [error code] or an [extended error code] is returned.)^
-**
-** This routine only works on a [BLOB handle] which has been created
-** by a prior successful call to [sqlite3_blob_open()] and which has not
-** been closed by [sqlite3_blob_close()]. Passing any other pointer in
-** to this routine results in undefined and probably undesirable behavior.
-**
-** See also: [sqlite3_blob_write()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset);
-
-/*
-** CAPI3REF: Write Data Into A BLOB Incrementally
-** METHOD: sqlite3_blob
-**
-** ^(This function is used to write data into an open [BLOB handle] from a
-** caller-supplied buffer. N bytes of data are copied from the buffer Z
-** into the open BLOB, starting at offset iOffset.)^
-**
-** ^(On success, sqlite3_blob_write() returns SQLITE_OK.
-** Otherwise, an [error code] or an [extended error code] is returned.)^
-** ^Unless SQLITE_MISUSE is returned, this function sets the
-** [database connection] error code and message accessible via
-** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
-**
-** ^If the [BLOB handle] passed as the first argument was not opened for
-** writing (the flags parameter to [sqlite3_blob_open()] was zero),
-** this function returns [SQLITE_READONLY].
-**
-** This function may only modify the contents of the BLOB; it is
-** not possible to increase the size of a BLOB using this API.
-** ^If offset iOffset is less than N bytes from the end of the BLOB,
-** [SQLITE_ERROR] is returned and no data is written. The size of the
-** BLOB (and hence the maximum value of N+iOffset) can be determined
-** using the [sqlite3_blob_bytes()] interface. ^If N or iOffset are less
-** than zero [SQLITE_ERROR] is returned and no data is written.
-**
-** ^An attempt to write to an expired [BLOB handle] fails with an
-** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred
-** before the [BLOB handle] expired are not rolled back by the
-** expiration of the handle, though of course those changes might
-** have been overwritten by the statement that expired the BLOB handle
-** or by other independent statements.
-**
-** This routine only works on a [BLOB handle] which has been created
-** by a prior successful call to [sqlite3_blob_open()] and which has not
-** been closed by [sqlite3_blob_close()]. Passing any other pointer in
-** to this routine results in undefined and probably undesirable behavior.
-**
-** See also: [sqlite3_blob_read()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset);
-
-/*
-** CAPI3REF: Virtual File System Objects
-**
-** A virtual filesystem (VFS) is an [sqlite3_vfs] object
-** that SQLite uses to interact
-** with the underlying operating system. Most SQLite builds come with a
-** single default VFS that is appropriate for the host computer.
-** New VFSes can be registered and existing VFSes can be unregistered.
-** The following interfaces are provided.
-**
-** ^The sqlite3_vfs_find() interface returns a pointer to a VFS given its name.
-** ^Names are case sensitive.
-** ^Names are zero-terminated UTF-8 strings.
-** ^If there is no match, a NULL pointer is returned.
-** ^If zVfsName is NULL then the default VFS is returned.
-**
-** ^New VFSes are registered with sqlite3_vfs_register().
-** ^Each new VFS becomes the default VFS if the makeDflt flag is set.
-** ^The same VFS can be registered multiple times without injury.
-** ^To make an existing VFS into the default VFS, register it again
-** with the makeDflt flag set. If two different VFSes with the
-** same name are registered, the behavior is undefined. If a
-** VFS is registered with a name that is NULL or an empty string,
-** then the behavior is undefined.
-**
-** ^Unregister a VFS with the sqlite3_vfs_unregister() interface.
-** ^(If the default VFS is unregistered, another VFS is chosen as
-** the default. The choice for the new VFS is arbitrary.)^
-*/
-SQLITE_API sqlite3_vfs *SQLITE_STDCALL sqlite3_vfs_find(const char *zVfsName);
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs*, int makeDflt);
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs*);
-
-/*
-** CAPI3REF: Mutexes
-**
-** The SQLite core uses these routines for thread
-** synchronization. Though they are intended for internal
-** use by SQLite, code that links against SQLite is
-** permitted to use any of these routines.
-**
-** The SQLite source code contains multiple implementations
-** of these mutex routines. An appropriate implementation
-** is selected automatically at compile-time. The following
-** implementations are available in the SQLite core:
-**
-**
-** SQLITE_MUTEX_PTHREADS
-** SQLITE_MUTEX_W32
-** SQLITE_MUTEX_NOOP
-**
-**
-** The SQLITE_MUTEX_NOOP implementation is a set of routines
-** that does no real locking and is appropriate for use in
-** a single-threaded application. The SQLITE_MUTEX_PTHREADS and
-** SQLITE_MUTEX_W32 implementations are appropriate for use on Unix
-** and Windows.
-**
-** If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor
-** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex
-** implementation is included with the library. In this case the
-** application must supply a custom mutex implementation using the
-** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function
-** before calling sqlite3_initialize() or any other public sqlite3_
-** function that calls sqlite3_initialize().
-**
-** ^The sqlite3_mutex_alloc() routine allocates a new
-** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc()
-** routine returns NULL if it is unable to allocate the requested
-** mutex. The argument to sqlite3_mutex_alloc() must one of these
-** integer constants:
-**
-**
-** SQLITE_MUTEX_FAST
-** SQLITE_MUTEX_RECURSIVE
-** SQLITE_MUTEX_STATIC_MASTER
-** SQLITE_MUTEX_STATIC_MEM
-** SQLITE_MUTEX_STATIC_OPEN
-** SQLITE_MUTEX_STATIC_PRNG
-** SQLITE_MUTEX_STATIC_LRU
-** SQLITE_MUTEX_STATIC_PMEM
-** SQLITE_MUTEX_STATIC_APP1
-** SQLITE_MUTEX_STATIC_APP2
-** SQLITE_MUTEX_STATIC_APP3
-** SQLITE_MUTEX_STATIC_VFS1
-** SQLITE_MUTEX_STATIC_VFS2
-** SQLITE_MUTEX_STATIC_VFS3
-**
-**
-** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE)
-** cause sqlite3_mutex_alloc() to create
-** a new mutex. ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE
-** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
-** The mutex implementation does not need to make a distinction
-** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does
-** not want to. SQLite will only request a recursive mutex in
-** cases where it really needs one. If a faster non-recursive mutex
-** implementation is available on the host platform, the mutex subsystem
-** might return such a mutex in response to SQLITE_MUTEX_FAST.
-**
-** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other
-** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return
-** a pointer to a static preexisting mutex. ^Nine static mutexes are
-** used by the current version of SQLite. Future versions of SQLite
-** may add additional static mutexes. Static mutexes are for internal
-** use by SQLite only. Applications that use SQLite mutexes should
-** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or
-** SQLITE_MUTEX_RECURSIVE.
-**
-** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST
-** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc()
-** returns a different mutex on every call. ^For the static
-** mutex types, the same mutex is returned on every call that has
-** the same type number.
-**
-** ^The sqlite3_mutex_free() routine deallocates a previously
-** allocated dynamic mutex. Attempting to deallocate a static
-** mutex results in undefined behavior.
-**
-** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
-** to enter a mutex. ^If another thread is already within the mutex,
-** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return
-** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK]
-** upon successful entry. ^(Mutexes created using
-** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread.
-** In such cases, the
-** mutex must be exited an equal number of times before another thread
-** can enter.)^ If the same thread tries to enter any mutex other
-** than an SQLITE_MUTEX_RECURSIVE more than once, the behavior is undefined.
-**
-** ^(Some systems (for example, Windows 95) do not support the operation
-** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
-** will always return SQLITE_BUSY. The SQLite core only ever uses
-** sqlite3_mutex_try() as an optimization so this is acceptable
-** behavior.)^
-**
-** ^The sqlite3_mutex_leave() routine exits a mutex that was
-** previously entered by the same thread. The behavior
-** is undefined if the mutex is not currently entered by the
-** calling thread or is not currently allocated.
-**
-** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or
-** sqlite3_mutex_leave() is a NULL pointer, then all three routines
-** behave as no-ops.
-**
-** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()].
-*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_mutex_alloc(int);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex*);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex*);
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex*);
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex*);
-
-/*
-** CAPI3REF: Mutex Methods Object
-**
-** An instance of this structure defines the low-level routines
-** used to allocate and use mutexes.
-**
-** Usually, the default mutex implementations provided by SQLite are
-** sufficient, however the application has the option of substituting a custom
-** implementation for specialized deployments or systems for which SQLite
-** does not provide a suitable implementation. In this case, the application
-** creates and populates an instance of this structure to pass
-** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option.
-** Additionally, an instance of this structure can be used as an
-** output variable when querying the system for the current mutex
-** implementation, using the [SQLITE_CONFIG_GETMUTEX] option.
-**
-** ^The xMutexInit method defined by this structure is invoked as
-** part of system initialization by the sqlite3_initialize() function.
-** ^The xMutexInit routine is called by SQLite exactly once for each
-** effective call to [sqlite3_initialize()].
-**
-** ^The xMutexEnd method defined by this structure is invoked as
-** part of system shutdown by the sqlite3_shutdown() function. The
-** implementation of this method is expected to release all outstanding
-** resources obtained by the mutex methods implementation, especially
-** those obtained by the xMutexInit method. ^The xMutexEnd()
-** interface is invoked exactly once for each call to [sqlite3_shutdown()].
-**
-** ^(The remaining seven methods defined by this structure (xMutexAlloc,
-** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and
-** xMutexNotheld) implement the following interfaces (respectively):
-**
-**
-** [sqlite3_mutex_alloc()]
-** [sqlite3_mutex_free()]
-** [sqlite3_mutex_enter()]
-** [sqlite3_mutex_try()]
-** [sqlite3_mutex_leave()]
-** [sqlite3_mutex_held()]
-** [sqlite3_mutex_notheld()]
-** )^
-**
-** The only difference is that the public sqlite3_XXX functions enumerated
-** above silently ignore any invocations that pass a NULL pointer instead
-** of a valid mutex handle. The implementations of the methods defined
-** by this structure are not required to handle this case, the results
-** of passing a NULL pointer instead of a valid mutex handle are undefined
-** (i.e. it is acceptable to provide an implementation that segfaults if
-** it is passed a NULL pointer).
-**
-** The xMutexInit() method must be threadsafe. It must be harmless to
-** invoke xMutexInit() multiple times within the same process and without
-** intervening calls to xMutexEnd(). Second and subsequent calls to
-** xMutexInit() must be no-ops.
-**
-** xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()]
-** and its associates). Similarly, xMutexAlloc() must not use SQLite memory
-** allocation for a static mutex. ^However xMutexAlloc() may use SQLite
-** memory allocation for a fast or recursive mutex.
-**
-** ^SQLite will invoke the xMutexEnd() method when [sqlite3_shutdown()] is
-** called, but only if the prior call to xMutexInit returned SQLITE_OK.
-** If xMutexInit fails in any way, it is expected to clean up after itself
-** prior to returning.
-*/
-typedef struct sqlite3_mutex_methods sqlite3_mutex_methods;
-struct sqlite3_mutex_methods {
- int (*xMutexInit)(void);
- int (*xMutexEnd)(void);
- sqlite3_mutex *(*xMutexAlloc)(int);
- void (*xMutexFree)(sqlite3_mutex *);
- void (*xMutexEnter)(sqlite3_mutex *);
- int (*xMutexTry)(sqlite3_mutex *);
- void (*xMutexLeave)(sqlite3_mutex *);
- int (*xMutexHeld)(sqlite3_mutex *);
- int (*xMutexNotheld)(sqlite3_mutex *);
-};
-
-/*
-** CAPI3REF: Mutex Verification Routines
-**
-** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines
-** are intended for use inside assert() statements. The SQLite core
-** never uses these routines except inside an assert() and applications
-** are advised to follow the lead of the core. The SQLite core only
-** provides implementations for these routines when it is compiled
-** with the SQLITE_DEBUG flag. External mutex implementations
-** are only required to provide these routines if SQLITE_DEBUG is
-** defined and if NDEBUG is not defined.
-**
-** These routines should return true if the mutex in their argument
-** is held or not held, respectively, by the calling thread.
-**
-** The implementation is not required to provide versions of these
-** routines that actually work. If the implementation does not provide working
-** versions of these routines, it should at least provide stubs that always
-** return true so that one does not get spurious assertion failures.
-**
-** If the argument to sqlite3_mutex_held() is a NULL pointer then
-** the routine should return 1. This seems counter-intuitive since
-** clearly the mutex cannot be held if it does not exist. But
-** the reason the mutex does not exist is because the build is not
-** using mutexes. And we do not want the assert() containing the
-** call to sqlite3_mutex_held() to fail, so a non-zero return is
-** the appropriate thing to do. The sqlite3_mutex_notheld()
-** interface should also return 1 when given a NULL pointer.
-*/
-#ifndef NDEBUG
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_held(sqlite3_mutex*);
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex*);
-#endif
-
-/*
-** CAPI3REF: Mutex Types
-**
-** The [sqlite3_mutex_alloc()] interface takes a single argument
-** which is one of these integer constants.
-**
-** The set of static mutexes may change from one SQLite release to the
-** next. Applications that override the built-in mutex logic must be
-** prepared to accommodate additional static mutexes.
-*/
-#define SQLITE_MUTEX_FAST 0
-#define SQLITE_MUTEX_RECURSIVE 1
-#define SQLITE_MUTEX_STATIC_MASTER 2
-#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */
-#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */
-#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */
-#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_random() */
-#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */
-#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */
-#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */
-#define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */
-#define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */
-#define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */
-#define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */
-#define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */
-#define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */
-
-/*
-** CAPI3REF: Retrieve the mutex for a database connection
-** METHOD: sqlite3
-**
-** ^This interface returns a pointer the [sqlite3_mutex] object that
-** serializes access to the [database connection] given in the argument
-** when the [threading mode] is Serialized.
-** ^If the [threading mode] is Single-thread or Multi-thread then this
-** routine returns a NULL pointer.
-*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_db_mutex(sqlite3*);
-
-/*
-** CAPI3REF: Low-Level Control Of Database Files
-** METHOD: sqlite3
-**
-** ^The [sqlite3_file_control()] interface makes a direct call to the
-** xFileControl method for the [sqlite3_io_methods] object associated
-** with a particular database identified by the second argument. ^The
-** name of the database is "main" for the main database or "temp" for the
-** TEMP database, or the name that appears after the AS keyword for
-** databases that are added using the [ATTACH] SQL command.
-** ^A NULL pointer can be used in place of "main" to refer to the
-** main database file.
-** ^The third and fourth parameters to this routine
-** are passed directly through to the second and third parameters of
-** the xFileControl method. ^The return value of the xFileControl
-** method becomes the return value of this routine.
-**
-** ^The SQLITE_FCNTL_FILE_POINTER value for the op parameter causes
-** a pointer to the underlying [sqlite3_file] object to be written into
-** the space pointed to by the 4th parameter. ^The SQLITE_FCNTL_FILE_POINTER
-** case is a short-circuit path which does not actually invoke the
-** underlying sqlite3_io_methods.xFileControl method.
-**
-** ^If the second parameter (zDbName) does not match the name of any
-** open database file, then SQLITE_ERROR is returned. ^This error
-** code is not remembered and will not be recalled by [sqlite3_errcode()]
-** or [sqlite3_errmsg()]. The underlying xFileControl method might
-** also return SQLITE_ERROR. There is no way to distinguish between
-** an incorrect zDbName and an SQLITE_ERROR return from the underlying
-** xFileControl method.
-**
-** See also: [SQLITE_FCNTL_LOCKSTATE]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*);
-
-/*
-** CAPI3REF: Testing Interface
-**
-** ^The sqlite3_test_control() interface is used to read out internal
-** state of SQLite and to inject faults into SQLite for testing
-** purposes. ^The first parameter is an operation code that determines
-** the number, meaning, and operation of all subsequent parameters.
-**
-** This interface is not for use by applications. It exists solely
-** for verifying the correct operation of the SQLite library. Depending
-** on how the SQLite library is compiled, this interface might not exist.
-**
-** The details of the operation codes, their meanings, the parameters
-** they take, and what they do are all subject to change without notice.
-** Unlike most of the SQLite API, this function is not guaranteed to
-** operate consistently from one release to the next.
-*/
-SQLITE_API int SQLITE_CDECL sqlite3_test_control(int op, ...);
-
-/*
-** CAPI3REF: Testing Interface Operation Codes
-**
-** These constants are the valid operation code parameters used
-** as the first argument to [sqlite3_test_control()].
-**
-** These parameters and their meanings are subject to change
-** without notice. These values are for testing purposes only.
-** Applications should not use any of these parameters or the
-** [sqlite3_test_control()] interface.
-*/
-#define SQLITE_TESTCTRL_FIRST 5
-#define SQLITE_TESTCTRL_PRNG_SAVE 5
-#define SQLITE_TESTCTRL_PRNG_RESTORE 6
-#define SQLITE_TESTCTRL_PRNG_RESET 7
-#define SQLITE_TESTCTRL_BITVEC_TEST 8
-#define SQLITE_TESTCTRL_FAULT_INSTALL 9
-#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10
-#define SQLITE_TESTCTRL_PENDING_BYTE 11
-#define SQLITE_TESTCTRL_ASSERT 12
-#define SQLITE_TESTCTRL_ALWAYS 13
-#define SQLITE_TESTCTRL_RESERVE 14
-#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
-#define SQLITE_TESTCTRL_ISKEYWORD 16
-#define SQLITE_TESTCTRL_SCRATCHMALLOC 17
-#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18
-#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */
-#define SQLITE_TESTCTRL_NEVER_CORRUPT 20
-#define SQLITE_TESTCTRL_VDBE_COVERAGE 21
-#define SQLITE_TESTCTRL_BYTEORDER 22
-#define SQLITE_TESTCTRL_ISINIT 23
-#define SQLITE_TESTCTRL_SORTER_MMAP 24
-#define SQLITE_TESTCTRL_IMPOSTER 25
-#define SQLITE_TESTCTRL_LAST 25
-
-/*
-** CAPI3REF: SQLite Runtime Status
-**
-** ^These interfaces are used to retrieve runtime status information
-** about the performance of SQLite, and optionally to reset various
-** highwater marks. ^The first argument is an integer code for
-** the specific parameter to measure. ^(Recognized integer codes
-** are of the form [status parameters | SQLITE_STATUS_...].)^
-** ^The current value of the parameter is returned into *pCurrent.
-** ^The highest recorded value is returned in *pHighwater. ^If the
-** resetFlag is true, then the highest record value is reset after
-** *pHighwater is written. ^(Some parameters do not record the highest
-** value. For those parameters
-** nothing is written into *pHighwater and the resetFlag is ignored.)^
-** ^(Other parameters record only the highwater mark and not the current
-** value. For these latter parameters nothing is written into *pCurrent.)^
-**
-** ^The sqlite3_status() and sqlite3_status64() routines return
-** SQLITE_OK on success and a non-zero [error code] on failure.
-**
-** If either the current value or the highwater mark is too large to
-** be represented by a 32-bit integer, then the values returned by
-** sqlite3_status() are undefined.
-**
-** See also: [sqlite3_db_status()]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag);
-SQLITE_API int SQLITE_STDCALL sqlite3_status64(
- int op,
- sqlite3_int64 *pCurrent,
- sqlite3_int64 *pHighwater,
- int resetFlag
-);
-
-
-/*
-** CAPI3REF: Status Parameters
-** KEYWORDS: {status parameters}
-**
-** These integer constants designate various run-time status parameters
-** that can be returned by [sqlite3_status()].
-**
-**
-** [[SQLITE_STATUS_MEMORY_USED]] ^(SQLITE_STATUS_MEMORY_USED
-** This parameter is the current amount of memory checked out
-** using [sqlite3_malloc()], either directly or indirectly. The
-** figure includes calls made to [sqlite3_malloc()] by the application
-** and internal memory usage by the SQLite library. Scratch memory
-** controlled by [SQLITE_CONFIG_SCRATCH] and auxiliary page-cache
-** memory controlled by [SQLITE_CONFIG_PAGECACHE] is not included in
-** this parameter. The amount returned is the sum of the allocation
-** sizes as reported by the xSize method in [sqlite3_mem_methods]. )^
-**
-** [[SQLITE_STATUS_MALLOC_SIZE]] ^(SQLITE_STATUS_MALLOC_SIZE
-** This parameter records the largest memory allocation request
-** handed to [sqlite3_malloc()] or [sqlite3_realloc()] (or their
-** internal equivalents). Only the value returned in the
-** *pHighwater parameter to [sqlite3_status()] is of interest.
-** The value written into the *pCurrent parameter is undefined. )^
-**
-** [[SQLITE_STATUS_MALLOC_COUNT]] ^(SQLITE_STATUS_MALLOC_COUNT
-** This parameter records the number of separate memory allocations
-** currently checked out. )^
-**
-** [[SQLITE_STATUS_PAGECACHE_USED]] ^(SQLITE_STATUS_PAGECACHE_USED
-** This parameter returns the number of pages used out of the
-** [pagecache memory allocator] that was configured using
-** [SQLITE_CONFIG_PAGECACHE]. The
-** value returned is in pages, not in bytes. )^
-**
-** [[SQLITE_STATUS_PAGECACHE_OVERFLOW]]
-** ^(SQLITE_STATUS_PAGECACHE_OVERFLOW
-** This parameter returns the number of bytes of page cache
-** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE]
-** buffer and where forced to overflow to [sqlite3_malloc()]. The
-** returned value includes allocations that overflowed because they
-** where too large (they were larger than the "sz" parameter to
-** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because
-** no space was left in the page cache. )^
-**
-** [[SQLITE_STATUS_PAGECACHE_SIZE]] ^(SQLITE_STATUS_PAGECACHE_SIZE
-** This parameter records the largest memory allocation request
-** handed to [pagecache memory allocator]. Only the value returned in the
-** *pHighwater parameter to [sqlite3_status()] is of interest.
-** The value written into the *pCurrent parameter is undefined. )^
-**
-** [[SQLITE_STATUS_SCRATCH_USED]] ^(SQLITE_STATUS_SCRATCH_USED
-** This parameter returns the number of allocations used out of the
-** [scratch memory allocator] configured using
-** [SQLITE_CONFIG_SCRATCH]. The value returned is in allocations, not
-** in bytes. Since a single thread may only have one scratch allocation
-** outstanding at time, this parameter also reports the number of threads
-** using scratch memory at the same time. )^
-**
-** [[SQLITE_STATUS_SCRATCH_OVERFLOW]] ^(SQLITE_STATUS_SCRATCH_OVERFLOW
-** This parameter returns the number of bytes of scratch memory
-** allocation which could not be satisfied by the [SQLITE_CONFIG_SCRATCH]
-** buffer and where forced to overflow to [sqlite3_malloc()]. The values
-** returned include overflows because the requested allocation was too
-** larger (that is, because the requested allocation was larger than the
-** "sz" parameter to [SQLITE_CONFIG_SCRATCH]) and because no scratch buffer
-** slots were available.
-** )^
-**
-** [[SQLITE_STATUS_SCRATCH_SIZE]] ^(SQLITE_STATUS_SCRATCH_SIZE
-** This parameter records the largest memory allocation request
-** handed to [scratch memory allocator]. Only the value returned in the
-** *pHighwater parameter to [sqlite3_status()] is of interest.
-** The value written into the *pCurrent parameter is undefined. )^
-**
-** [[SQLITE_STATUS_PARSER_STACK]] ^(SQLITE_STATUS_PARSER_STACK
-** The *pHighwater parameter records the deepest parser stack.
-** The *pCurrent value is undefined. The *pHighwater value is only
-** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH]. )^
-**
-**
-** New status parameters may be added from time to time.
-*/
-#define SQLITE_STATUS_MEMORY_USED 0
-#define SQLITE_STATUS_PAGECACHE_USED 1
-#define SQLITE_STATUS_PAGECACHE_OVERFLOW 2
-#define SQLITE_STATUS_SCRATCH_USED 3
-#define SQLITE_STATUS_SCRATCH_OVERFLOW 4
-#define SQLITE_STATUS_MALLOC_SIZE 5
-#define SQLITE_STATUS_PARSER_STACK 6
-#define SQLITE_STATUS_PAGECACHE_SIZE 7
-#define SQLITE_STATUS_SCRATCH_SIZE 8
-#define SQLITE_STATUS_MALLOC_COUNT 9
-
-/*
-** CAPI3REF: Database Connection Status
-** METHOD: sqlite3
-**
-** ^This interface is used to retrieve runtime status information
-** about a single [database connection]. ^The first argument is the
-** database connection object to be interrogated. ^The second argument
-** is an integer constant, taken from the set of
-** [SQLITE_DBSTATUS options], that
-** determines the parameter to interrogate. The set of
-** [SQLITE_DBSTATUS options] is likely
-** to grow in future releases of SQLite.
-**
-** ^The current value of the requested parameter is written into *pCur
-** and the highest instantaneous value is written into *pHiwtr. ^If
-** the resetFlg is true, then the highest instantaneous value is
-** reset back down to the current value.
-**
-** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a
-** non-zero [error code] on failure.
-**
-** See also: [sqlite3_status()] and [sqlite3_stmt_status()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg);
-
-/*
-** CAPI3REF: Status Parameters for database connections
-** KEYWORDS: {SQLITE_DBSTATUS options}
-**
-** These constants are the available integer "verbs" that can be passed as
-** the second argument to the [sqlite3_db_status()] interface.
-**
-** New verbs may be added in future releases of SQLite. Existing verbs
-** might be discontinued. Applications should check the return code from
-** [sqlite3_db_status()] to make sure that the call worked.
-** The [sqlite3_db_status()] interface will return a non-zero error code
-** if a discontinued or unsupported verb is invoked.
-**
-**
-** [[SQLITE_DBSTATUS_LOOKASIDE_USED]] ^(SQLITE_DBSTATUS_LOOKASIDE_USED
-** This parameter returns the number of lookaside memory slots currently
-** checked out. )^
-**
-** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(SQLITE_DBSTATUS_LOOKASIDE_HIT
-** This parameter returns the number malloc attempts that were
-** satisfied using lookaside memory. Only the high-water value is meaningful;
-** the current value is always zero.)^
-**
-** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]]
-** ^( SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
-** This parameter returns the number malloc attempts that might have
-** been satisfied using lookaside memory but failed due to the amount of
-** memory requested being larger than the lookaside slot size.
-** Only the high-water value is meaningful;
-** the current value is always zero.)^
-**
-** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]]
-** ^( SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
-** This parameter returns the number malloc attempts that might have
-** been satisfied using lookaside memory but failed due to all lookaside
-** memory already being in use.
-** Only the high-water value is meaningful;
-** the current value is always zero.)^
-**
-** [[SQLITE_DBSTATUS_CACHE_USED]] ^( SQLITE_DBSTATUS_CACHE_USED
-** This parameter returns the approximate number of bytes of heap
-** memory used by all pager caches associated with the database connection.)^
-** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0.
-**
-** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]]
-** ^( SQLITE_DBSTATUS_CACHE_USED_SHARED
-** This parameter is similar to DBSTATUS_CACHE_USED, except that if a
-** pager cache is shared between two or more connections the bytes of heap
-** memory used by that pager cache is divided evenly between the attached
-** connections.)^ In other words, if none of the pager caches associated
-** with the database connection are shared, this request returns the same
-** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are
-** shared, the value returned by this call will be smaller than that returned
-** by DBSTATUS_CACHE_USED. ^The highwater mark associated with
-** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0.
-**
-** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^( SQLITE_DBSTATUS_SCHEMA_USED
-** This parameter returns the approximate number of bytes of heap
-** memory used to store the schema for all databases associated
-** with the connection - main, temp, and any [ATTACH]-ed databases.)^
-** ^The full amount of memory used by the schemas is reported, even if the
-** schema memory is shared with other database connections due to
-** [shared cache mode] being enabled.
-** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0.
-**
-** [[SQLITE_DBSTATUS_STMT_USED]] ^( SQLITE_DBSTATUS_STMT_USED
-** This parameter returns the approximate number of bytes of heap
-** and lookaside memory used by all prepared statements associated with
-** the database connection.)^
-** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0.
-**
-**
-** [[SQLITE_DBSTATUS_CACHE_HIT]] ^(SQLITE_DBSTATUS_CACHE_HIT
-** This parameter returns the number of pager cache hits that have
-** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_HIT
-** is always 0.
-**
-**
-** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(SQLITE_DBSTATUS_CACHE_MISS
-** This parameter returns the number of pager cache misses that have
-** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS
-** is always 0.
-**
-**
-** [[SQLITE_DBSTATUS_CACHE_WRITE]] ^(SQLITE_DBSTATUS_CACHE_WRITE
-** This parameter returns the number of dirty cache entries that have
-** been written to disk. Specifically, the number of pages written to the
-** wal file in wal mode databases, or the number of pages written to the
-** database file in rollback mode databases. Any pages written as part of
-** transaction rollback or database recovery operations are not included.
-** If an IO or other error occurs while writing a page to disk, the effect
-** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The
-** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0.
-**
-**
-** [[SQLITE_DBSTATUS_DEFERRED_FKS]] ^(SQLITE_DBSTATUS_DEFERRED_FKS
-** This parameter returns zero for the current value if and only if
-** all foreign key constraints (deferred or immediate) have been
-** resolved.)^ ^The highwater mark is always 0.
-**
-**
-*/
-#define SQLITE_DBSTATUS_LOOKASIDE_USED 0
-#define SQLITE_DBSTATUS_CACHE_USED 1
-#define SQLITE_DBSTATUS_SCHEMA_USED 2
-#define SQLITE_DBSTATUS_STMT_USED 3
-#define SQLITE_DBSTATUS_LOOKASIDE_HIT 4
-#define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5
-#define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6
-#define SQLITE_DBSTATUS_CACHE_HIT 7
-#define SQLITE_DBSTATUS_CACHE_MISS 8
-#define SQLITE_DBSTATUS_CACHE_WRITE 9
-#define SQLITE_DBSTATUS_DEFERRED_FKS 10
-#define SQLITE_DBSTATUS_CACHE_USED_SHARED 11
-#define SQLITE_DBSTATUS_MAX 11 /* Largest defined DBSTATUS */
-
-
-/*
-** CAPI3REF: Prepared Statement Status
-** METHOD: sqlite3_stmt
-**
-** ^(Each prepared statement maintains various
-** [SQLITE_STMTSTATUS counters] that measure the number
-** of times it has performed specific operations.)^ These counters can
-** be used to monitor the performance characteristics of the prepared
-** statements. For example, if the number of table steps greatly exceeds
-** the number of table searches or result rows, that would tend to indicate
-** that the prepared statement is using a full table scan rather than
-** an index.
-**
-** ^(This interface is used to retrieve and reset counter values from
-** a [prepared statement]. The first argument is the prepared statement
-** object to be interrogated. The second argument
-** is an integer code for a specific [SQLITE_STMTSTATUS counter]
-** to be interrogated.)^
-** ^The current value of the requested counter is returned.
-** ^If the resetFlg is true, then the counter is reset to zero after this
-** interface call returns.
-**
-** See also: [sqlite3_status()] and [sqlite3_db_status()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg);
-
-/*
-** CAPI3REF: Status Parameters for prepared statements
-** KEYWORDS: {SQLITE_STMTSTATUS counter} {SQLITE_STMTSTATUS counters}
-**
-** These preprocessor macros define integer codes that name counter
-** values associated with the [sqlite3_stmt_status()] interface.
-** The meanings of the various counters are as follows:
-**
-**
-** [[SQLITE_STMTSTATUS_FULLSCAN_STEP]] SQLITE_STMTSTATUS_FULLSCAN_STEP
-** ^This is the number of times that SQLite has stepped forward in
-** a table as part of a full table scan. Large numbers for this counter
-** may indicate opportunities for performance improvement through
-** careful use of indices.
-**
-** [[SQLITE_STMTSTATUS_SORT]] SQLITE_STMTSTATUS_SORT
-** ^This is the number of sort operations that have occurred.
-** A non-zero value in this counter may indicate an opportunity to
-** improvement performance through careful use of indices.
-**
-** [[SQLITE_STMTSTATUS_AUTOINDEX]] SQLITE_STMTSTATUS_AUTOINDEX
-** ^This is the number of rows inserted into transient indices that
-** were created automatically in order to help joins run faster.
-** A non-zero value in this counter may indicate an opportunity to
-** improvement performance by adding permanent indices that do not
-** need to be reinitialized each time the statement is run.
-**
-** [[SQLITE_STMTSTATUS_VM_STEP]] SQLITE_STMTSTATUS_VM_STEP
-** ^This is the number of virtual machine operations executed
-** by the prepared statement if that number is less than or equal
-** to 2147483647. The number of virtual machine operations can be
-** used as a proxy for the total work done by the prepared statement.
-** If the number of virtual machine operations exceeds 2147483647
-** then the value returned by this statement status code is undefined.
-**
-**
-*/
-#define SQLITE_STMTSTATUS_FULLSCAN_STEP 1
-#define SQLITE_STMTSTATUS_SORT 2
-#define SQLITE_STMTSTATUS_AUTOINDEX 3
-#define SQLITE_STMTSTATUS_VM_STEP 4
-
-/*
-** CAPI3REF: Custom Page Cache Object
-**
-** The sqlite3_pcache type is opaque. It is implemented by
-** the pluggable module. The SQLite core has no knowledge of
-** its size or internal structure and never deals with the
-** sqlite3_pcache object except by holding and passing pointers
-** to the object.
-**
-** See [sqlite3_pcache_methods2] for additional information.
-*/
-typedef struct sqlite3_pcache sqlite3_pcache;
-
-/*
-** CAPI3REF: Custom Page Cache Object
-**
-** The sqlite3_pcache_page object represents a single page in the
-** page cache. The page cache will allocate instances of this
-** object. Various methods of the page cache use pointers to instances
-** of this object as parameters or as their return value.
-**
-** See [sqlite3_pcache_methods2] for additional information.
-*/
-typedef struct sqlite3_pcache_page sqlite3_pcache_page;
-struct sqlite3_pcache_page {
- void *pBuf; /* The content of the page */
- void *pExtra; /* Extra information associated with the page */
-};
-
-/*
-** CAPI3REF: Application Defined Page Cache.
-** KEYWORDS: {page cache}
-**
-** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE2], ...) interface can
-** register an alternative page cache implementation by passing in an
-** instance of the sqlite3_pcache_methods2 structure.)^
-** In many applications, most of the heap memory allocated by
-** SQLite is used for the page cache.
-** By implementing a
-** custom page cache using this API, an application can better control
-** the amount of memory consumed by SQLite, the way in which
-** that memory is allocated and released, and the policies used to
-** determine exactly which parts of a database file are cached and for
-** how long.
-**
-** The alternative page cache mechanism is an
-** extreme measure that is only needed by the most demanding applications.
-** The built-in page cache is recommended for most uses.
-**
-** ^(The contents of the sqlite3_pcache_methods2 structure are copied to an
-** internal buffer by SQLite within the call to [sqlite3_config]. Hence
-** the application may discard the parameter after the call to
-** [sqlite3_config()] returns.)^
-**
-** [[the xInit() page cache method]]
-** ^(The xInit() method is called once for each effective
-** call to [sqlite3_initialize()])^
-** (usually only once during the lifetime of the process). ^(The xInit()
-** method is passed a copy of the sqlite3_pcache_methods2.pArg value.)^
-** The intent of the xInit() method is to set up global data structures
-** required by the custom page cache implementation.
-** ^(If the xInit() method is NULL, then the
-** built-in default page cache is used instead of the application defined
-** page cache.)^
-**
-** [[the xShutdown() page cache method]]
-** ^The xShutdown() method is called by [sqlite3_shutdown()].
-** It can be used to clean up
-** any outstanding resources before process shutdown, if required.
-** ^The xShutdown() method may be NULL.
-**
-** ^SQLite automatically serializes calls to the xInit method,
-** so the xInit method need not be threadsafe. ^The
-** xShutdown method is only called from [sqlite3_shutdown()] so it does
-** not need to be threadsafe either. All other methods must be threadsafe
-** in multithreaded applications.
-**
-** ^SQLite will never invoke xInit() more than once without an intervening
-** call to xShutdown().
-**
-** [[the xCreate() page cache methods]]
-** ^SQLite invokes the xCreate() method to construct a new cache instance.
-** SQLite will typically create one cache instance for each open database file,
-** though this is not guaranteed. ^The
-** first parameter, szPage, is the size in bytes of the pages that must
-** be allocated by the cache. ^szPage will always a power of two. ^The
-** second parameter szExtra is a number of bytes of extra storage
-** associated with each page cache entry. ^The szExtra parameter will
-** a number less than 250. SQLite will use the
-** extra szExtra bytes on each page to store metadata about the underlying
-** database page on disk. The value passed into szExtra depends
-** on the SQLite version, the target platform, and how SQLite was compiled.
-** ^The third argument to xCreate(), bPurgeable, is true if the cache being
-** created will be used to cache database pages of a file stored on disk, or
-** false if it is used for an in-memory database. The cache implementation
-** does not have to do anything special based with the value of bPurgeable;
-** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will
-** never invoke xUnpin() except to deliberately delete a page.
-** ^In other words, calls to xUnpin() on a cache with bPurgeable set to
-** false will always have the "discard" flag set to true.
-** ^Hence, a cache created with bPurgeable false will
-** never contain any unpinned pages.
-**
-** [[the xCachesize() page cache method]]
-** ^(The xCachesize() method may be called at any time by SQLite to set the
-** suggested maximum cache-size (number of pages stored by) the cache
-** instance passed as the first argument. This is the value configured using
-** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable
-** parameter, the implementation is not required to do anything with this
-** value; it is advisory only.
-**
-** [[the xPagecount() page cache methods]]
-** The xPagecount() method must return the number of pages currently
-** stored in the cache, both pinned and unpinned.
-**
-** [[the xFetch() page cache methods]]
-** The xFetch() method locates a page in the cache and returns a pointer to
-** an sqlite3_pcache_page object associated with that page, or a NULL pointer.
-** The pBuf element of the returned sqlite3_pcache_page object will be a
-** pointer to a buffer of szPage bytes used to store the content of a
-** single database page. The pExtra element of sqlite3_pcache_page will be
-** a pointer to the szExtra bytes of extra storage that SQLite has requested
-** for each entry in the page cache.
-**
-** The page to be fetched is determined by the key. ^The minimum key value
-** is 1. After it has been retrieved using xFetch, the page is considered
-** to be "pinned".
-**
-** If the requested page is already in the page cache, then the page cache
-** implementation must return a pointer to the page buffer with its content
-** intact. If the requested page is not already in the cache, then the
-** cache implementation should use the value of the createFlag
-** parameter to help it determined what action to take:
-**
-**
-** createFlag Behavior when page is not already in cache
-** 0 Do not allocate a new page. Return NULL.
-** 1 Allocate a new page if it easy and convenient to do so.
-** Otherwise return NULL.
-** 2 Make every effort to allocate a new page. Only return
-** NULL if allocating a new page is effectively impossible.
-**
-**
-** ^(SQLite will normally invoke xFetch() with a createFlag of 0 or 1. SQLite
-** will only use a createFlag of 2 after a prior call with a createFlag of 1
-** failed.)^ In between the to xFetch() calls, SQLite may
-** attempt to unpin one or more cache pages by spilling the content of
-** pinned pages to disk and synching the operating system disk cache.
-**
-** [[the xUnpin() page cache method]]
-** ^xUnpin() is called by SQLite with a pointer to a currently pinned page
-** as its second argument. If the third parameter, discard, is non-zero,
-** then the page must be evicted from the cache.
-** ^If the discard parameter is
-** zero, then the page may be discarded or retained at the discretion of
-** page cache implementation. ^The page cache implementation
-** may choose to evict unpinned pages at any time.
-**
-** The cache must not perform any reference counting. A single
-** call to xUnpin() unpins the page regardless of the number of prior calls
-** to xFetch().
-**
-** [[the xRekey() page cache methods]]
-** The xRekey() method is used to change the key value associated with the
-** page passed as the second argument. If the cache
-** previously contains an entry associated with newKey, it must be
-** discarded. ^Any prior cache entry associated with newKey is guaranteed not
-** to be pinned.
-**
-** When SQLite calls the xTruncate() method, the cache must discard all
-** existing cache entries with page numbers (keys) greater than or equal
-** to the value of the iLimit parameter passed to xTruncate(). If any
-** of these pages are pinned, they are implicitly unpinned, meaning that
-** they can be safely discarded.
-**
-** [[the xDestroy() page cache method]]
-** ^The xDestroy() method is used to delete a cache allocated by xCreate().
-** All resources associated with the specified cache should be freed. ^After
-** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*]
-** handle invalid, and will not use it with any other sqlite3_pcache_methods2
-** functions.
-**
-** [[the xShrink() page cache method]]
-** ^SQLite invokes the xShrink() method when it wants the page cache to
-** free up as much of heap memory as possible. The page cache implementation
-** is not obligated to free any memory, but well-behaved implementations should
-** do their best.
-*/
-typedef struct sqlite3_pcache_methods2 sqlite3_pcache_methods2;
-struct sqlite3_pcache_methods2 {
- int iVersion;
- void *pArg;
- int (*xInit)(void*);
- void (*xShutdown)(void*);
- sqlite3_pcache *(*xCreate)(int szPage, int szExtra, int bPurgeable);
- void (*xCachesize)(sqlite3_pcache*, int nCachesize);
- int (*xPagecount)(sqlite3_pcache*);
- sqlite3_pcache_page *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag);
- void (*xUnpin)(sqlite3_pcache*, sqlite3_pcache_page*, int discard);
- void (*xRekey)(sqlite3_pcache*, sqlite3_pcache_page*,
- unsigned oldKey, unsigned newKey);
- void (*xTruncate)(sqlite3_pcache*, unsigned iLimit);
- void (*xDestroy)(sqlite3_pcache*);
- void (*xShrink)(sqlite3_pcache*);
-};
-
-/*
-** This is the obsolete pcache_methods object that has now been replaced
-** by sqlite3_pcache_methods2. This object is not used by SQLite. It is
-** retained in the header file for backwards compatibility only.
-*/
-typedef struct sqlite3_pcache_methods sqlite3_pcache_methods;
-struct sqlite3_pcache_methods {
- void *pArg;
- int (*xInit)(void*);
- void (*xShutdown)(void*);
- sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable);
- void (*xCachesize)(sqlite3_pcache*, int nCachesize);
- int (*xPagecount)(sqlite3_pcache*);
- void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag);
- void (*xUnpin)(sqlite3_pcache*, void*, int discard);
- void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey);
- void (*xTruncate)(sqlite3_pcache*, unsigned iLimit);
- void (*xDestroy)(sqlite3_pcache*);
-};
-
-
-/*
-** CAPI3REF: Online Backup Object
-**
-** The sqlite3_backup object records state information about an ongoing
-** online backup operation. ^The sqlite3_backup object is created by
-** a call to [sqlite3_backup_init()] and is destroyed by a call to
-** [sqlite3_backup_finish()].
-**
-** See Also: [Using the SQLite Online Backup API]
-*/
-typedef struct sqlite3_backup sqlite3_backup;
-
-/*
-** CAPI3REF: Online Backup API.
-**
-** The backup API copies the content of one database into another.
-** It is useful either for creating backups of databases or
-** for copying in-memory databases to or from persistent files.
-**
-** See Also: [Using the SQLite Online Backup API]
-**
-** ^SQLite holds a write transaction open on the destination database file
-** for the duration of the backup operation.
-** ^The source database is read-locked only while it is being read;
-** it is not locked continuously for the entire backup operation.
-** ^Thus, the backup may be performed on a live source database without
-** preventing other database connections from
-** reading or writing to the source database while the backup is underway.
-**
-** ^(To perform a backup operation:
-**
-** sqlite3_backup_init() is called once to initialize the
-** backup,
-** sqlite3_backup_step() is called one or more times to transfer
-** the data between the two databases, and finally
-** sqlite3_backup_finish() is called to release all resources
-** associated with the backup operation.
-** )^
-** There should be exactly one call to sqlite3_backup_finish() for each
-** successful call to sqlite3_backup_init().
-**
-** [[sqlite3_backup_init()]] sqlite3_backup_init()
-**
-** ^The D and N arguments to sqlite3_backup_init(D,N,S,M) are the
-** [database connection] associated with the destination database
-** and the database name, respectively.
-** ^The database name is "main" for the main database, "temp" for the
-** temporary database, or the name specified after the AS keyword in
-** an [ATTACH] statement for an attached database.
-** ^The S and M arguments passed to
-** sqlite3_backup_init(D,N,S,M) identify the [database connection]
-** and database name of the source database, respectively.
-** ^The source and destination [database connections] (parameters S and D)
-** must be different or else sqlite3_backup_init(D,N,S,M) will fail with
-** an error.
-**
-** ^A call to sqlite3_backup_init() will fail, returning NULL, if
-** there is already a read or read-write transaction open on the
-** destination database.
-**
-** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is
-** returned and an error code and error message are stored in the
-** destination [database connection] D.
-** ^The error code and message for the failed call to sqlite3_backup_init()
-** can be retrieved using the [sqlite3_errcode()], [sqlite3_errmsg()], and/or
-** [sqlite3_errmsg16()] functions.
-** ^A successful call to sqlite3_backup_init() returns a pointer to an
-** [sqlite3_backup] object.
-** ^The [sqlite3_backup] object may be used with the sqlite3_backup_step() and
-** sqlite3_backup_finish() functions to perform the specified backup
-** operation.
-**
-** [[sqlite3_backup_step()]] sqlite3_backup_step()
-**
-** ^Function sqlite3_backup_step(B,N) will copy up to N pages between
-** the source and destination databases specified by [sqlite3_backup] object B.
-** ^If N is negative, all remaining source pages are copied.
-** ^If sqlite3_backup_step(B,N) successfully copies N pages and there
-** are still more pages to be copied, then the function returns [SQLITE_OK].
-** ^If sqlite3_backup_step(B,N) successfully finishes copying all pages
-** from source to destination, then it returns [SQLITE_DONE].
-** ^If an error occurs while running sqlite3_backup_step(B,N),
-** then an [error code] is returned. ^As well as [SQLITE_OK] and
-** [SQLITE_DONE], a call to sqlite3_backup_step() may return [SQLITE_READONLY],
-** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an
-** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code.
-**
-** ^(The sqlite3_backup_step() might return [SQLITE_READONLY] if
-**
-** the destination database was opened read-only, or
-** the destination database is using write-ahead-log journaling
-** and the destination and source page sizes differ, or
-** the destination database is an in-memory database and the
-** destination and source page sizes differ.
-** )^
-**
-** ^If sqlite3_backup_step() cannot obtain a required file-system lock, then
-** the [sqlite3_busy_handler | busy-handler function]
-** is invoked (if one is specified). ^If the
-** busy-handler returns non-zero before the lock is available, then
-** [SQLITE_BUSY] is returned to the caller. ^In this case the call to
-** sqlite3_backup_step() can be retried later. ^If the source
-** [database connection]
-** is being used to write to the source database when sqlite3_backup_step()
-** is called, then [SQLITE_LOCKED] is returned immediately. ^Again, in this
-** case the call to sqlite3_backup_step() can be retried later on. ^(If
-** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX], [SQLITE_NOMEM], or
-** [SQLITE_READONLY] is returned, then
-** there is no point in retrying the call to sqlite3_backup_step(). These
-** errors are considered fatal.)^ The application must accept
-** that the backup operation has failed and pass the backup operation handle
-** to the sqlite3_backup_finish() to release associated resources.
-**
-** ^The first call to sqlite3_backup_step() obtains an exclusive lock
-** on the destination file. ^The exclusive lock is not released until either
-** sqlite3_backup_finish() is called or the backup operation is complete
-** and sqlite3_backup_step() returns [SQLITE_DONE]. ^Every call to
-** sqlite3_backup_step() obtains a [shared lock] on the source database that
-** lasts for the duration of the sqlite3_backup_step() call.
-** ^Because the source database is not locked between calls to
-** sqlite3_backup_step(), the source database may be modified mid-way
-** through the backup process. ^If the source database is modified by an
-** external process or via a database connection other than the one being
-** used by the backup operation, then the backup will be automatically
-** restarted by the next call to sqlite3_backup_step(). ^If the source
-** database is modified by the using the same database connection as is used
-** by the backup operation, then the backup database is automatically
-** updated at the same time.
-**
-** [[sqlite3_backup_finish()]] sqlite3_backup_finish()
-**
-** When sqlite3_backup_step() has returned [SQLITE_DONE], or when the
-** application wishes to abandon the backup operation, the application
-** should destroy the [sqlite3_backup] by passing it to sqlite3_backup_finish().
-** ^The sqlite3_backup_finish() interfaces releases all
-** resources associated with the [sqlite3_backup] object.
-** ^If sqlite3_backup_step() has not yet returned [SQLITE_DONE], then any
-** active write-transaction on the destination database is rolled back.
-** The [sqlite3_backup] object is invalid
-** and may not be used following a call to sqlite3_backup_finish().
-**
-** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no
-** sqlite3_backup_step() errors occurred, regardless or whether or not
-** sqlite3_backup_step() completed.
-** ^If an out-of-memory condition or IO error occurred during any prior
-** sqlite3_backup_step() call on the same [sqlite3_backup] object, then
-** sqlite3_backup_finish() returns the corresponding [error code].
-**
-** ^A return of [SQLITE_BUSY] or [SQLITE_LOCKED] from sqlite3_backup_step()
-** is not a permanent error and does not affect the return value of
-** sqlite3_backup_finish().
-**
-** [[sqlite3_backup_remaining()]] [[sqlite3_backup_pagecount()]]
-** sqlite3_backup_remaining() and sqlite3_backup_pagecount()
-**
-** ^The sqlite3_backup_remaining() routine returns the number of pages still
-** to be backed up at the conclusion of the most recent sqlite3_backup_step().
-** ^The sqlite3_backup_pagecount() routine returns the total number of pages
-** in the source database at the conclusion of the most recent
-** sqlite3_backup_step().
-** ^(The values returned by these functions are only updated by
-** sqlite3_backup_step(). If the source database is modified in a way that
-** changes the size of the source database or the number of pages remaining,
-** those changes are not reflected in the output of sqlite3_backup_pagecount()
-** and sqlite3_backup_remaining() until after the next
-** sqlite3_backup_step().)^
-**
-** Concurrent Usage of Database Handles
-**
-** ^The source [database connection] may be used by the application for other
-** purposes while a backup operation is underway or being initialized.
-** ^If SQLite is compiled and configured to support threadsafe database
-** connections, then the source database connection may be used concurrently
-** from within other threads.
-**
-** However, the application must guarantee that the destination
-** [database connection] is not passed to any other API (by any thread) after
-** sqlite3_backup_init() is called and before the corresponding call to
-** sqlite3_backup_finish(). SQLite does not currently check to see
-** if the application incorrectly accesses the destination [database connection]
-** and so no error code is reported, but the operations may malfunction
-** nevertheless. Use of the destination database connection while a
-** backup is in progress might also also cause a mutex deadlock.
-**
-** If running in [shared cache mode], the application must
-** guarantee that the shared cache used by the destination database
-** is not accessed while the backup is running. In practice this means
-** that the application must guarantee that the disk file being
-** backed up to is not accessed by any connection within the process,
-** not just the specific connection that was passed to sqlite3_backup_init().
-**
-** The [sqlite3_backup] object itself is partially threadsafe. Multiple
-** threads may safely make multiple concurrent calls to sqlite3_backup_step().
-** However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount()
-** APIs are not strictly speaking threadsafe. If they are invoked at the
-** same time as another thread is invoking sqlite3_backup_step() it is
-** possible that they return invalid values.
-*/
-SQLITE_API sqlite3_backup *SQLITE_STDCALL sqlite3_backup_init(
- sqlite3 *pDest, /* Destination database handle */
- const char *zDestName, /* Destination database name */
- sqlite3 *pSource, /* Source database handle */
- const char *zSourceName /* Source database name */
-);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_step(sqlite3_backup *p, int nPage);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_finish(sqlite3_backup *p);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_remaining(sqlite3_backup *p);
-SQLITE_API int SQLITE_STDCALL sqlite3_backup_pagecount(sqlite3_backup *p);
-
-/*
-** CAPI3REF: Unlock Notification
-** METHOD: sqlite3
-**
-** ^When running in shared-cache mode, a database operation may fail with
-** an [SQLITE_LOCKED] error if the required locks on the shared-cache or
-** individual tables within the shared-cache cannot be obtained. See
-** [SQLite Shared-Cache Mode] for a description of shared-cache locking.
-** ^This API may be used to register a callback that SQLite will invoke
-** when the connection currently holding the required lock relinquishes it.
-** ^This API is only available if the library was compiled with the
-** [SQLITE_ENABLE_UNLOCK_NOTIFY] C-preprocessor symbol defined.
-**
-** See Also: [Using the SQLite Unlock Notification Feature].
-**
-** ^Shared-cache locks are released when a database connection concludes
-** its current transaction, either by committing it or rolling it back.
-**
-** ^When a connection (known as the blocked connection) fails to obtain a
-** shared-cache lock and SQLITE_LOCKED is returned to the caller, the
-** identity of the database connection (the blocking connection) that
-** has locked the required resource is stored internally. ^After an
-** application receives an SQLITE_LOCKED error, it may call the
-** sqlite3_unlock_notify() method with the blocked connection handle as
-** the first argument to register for a callback that will be invoked
-** when the blocking connections current transaction is concluded. ^The
-** callback is invoked from within the [sqlite3_step] or [sqlite3_close]
-** call that concludes the blocking connections transaction.
-**
-** ^(If sqlite3_unlock_notify() is called in a multi-threaded application,
-** there is a chance that the blocking connection will have already
-** concluded its transaction by the time sqlite3_unlock_notify() is invoked.
-** If this happens, then the specified callback is invoked immediately,
-** from within the call to sqlite3_unlock_notify().)^
-**
-** ^If the blocked connection is attempting to obtain a write-lock on a
-** shared-cache table, and more than one other connection currently holds
-** a read-lock on the same table, then SQLite arbitrarily selects one of
-** the other connections to use as the blocking connection.
-**
-** ^(There may be at most one unlock-notify callback registered by a
-** blocked connection. If sqlite3_unlock_notify() is called when the
-** blocked connection already has a registered unlock-notify callback,
-** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is
-** called with a NULL pointer as its second argument, then any existing
-** unlock-notify callback is canceled. ^The blocked connections
-** unlock-notify callback may also be canceled by closing the blocked
-** connection using [sqlite3_close()].
-**
-** The unlock-notify callback is not reentrant. If an application invokes
-** any sqlite3_xxx API functions from within an unlock-notify callback, a
-** crash or deadlock may be the result.
-**
-** ^Unless deadlock is detected (see below), sqlite3_unlock_notify() always
-** returns SQLITE_OK.
-**
-** Callback Invocation Details
-**
-** When an unlock-notify callback is registered, the application provides a
-** single void* pointer that is passed to the callback when it is invoked.
-** However, the signature of the callback function allows SQLite to pass
-** it an array of void* context pointers. The first argument passed to
-** an unlock-notify callback is a pointer to an array of void* pointers,
-** and the second is the number of entries in the array.
-**
-** When a blocking connections transaction is concluded, there may be
-** more than one blocked connection that has registered for an unlock-notify
-** callback. ^If two or more such blocked connections have specified the
-** same callback function, then instead of invoking the callback function
-** multiple times, it is invoked once with the set of void* context pointers
-** specified by the blocked connections bundled together into an array.
-** This gives the application an opportunity to prioritize any actions
-** related to the set of unblocked database connections.
-**
-** Deadlock Detection
-**
-** Assuming that after registering for an unlock-notify callback a
-** database waits for the callback to be issued before taking any further
-** action (a reasonable assumption), then using this API may cause the
-** application to deadlock. For example, if connection X is waiting for
-** connection Y's transaction to be concluded, and similarly connection
-** Y is waiting on connection X's transaction, then neither connection
-** will proceed and the system may remain deadlocked indefinitely.
-**
-** To avoid this scenario, the sqlite3_unlock_notify() performs deadlock
-** detection. ^If a given call to sqlite3_unlock_notify() would put the
-** system in a deadlocked state, then SQLITE_LOCKED is returned and no
-** unlock-notify callback is registered. The system is said to be in
-** a deadlocked state if connection A has registered for an unlock-notify
-** callback on the conclusion of connection B's transaction, and connection
-** B has itself registered for an unlock-notify callback when connection
-** A's transaction is concluded. ^Indirect deadlock is also detected, so
-** the system is also considered to be deadlocked if connection B has
-** registered for an unlock-notify callback on the conclusion of connection
-** C's transaction, where connection C is waiting on connection A. ^Any
-** number of levels of indirection are allowed.
-**
-** The "DROP TABLE" Exception
-**
-** When a call to [sqlite3_step()] returns SQLITE_LOCKED, it is almost
-** always appropriate to call sqlite3_unlock_notify(). There is however,
-** one exception. When executing a "DROP TABLE" or "DROP INDEX" statement,
-** SQLite checks if there are any currently executing SELECT statements
-** that belong to the same connection. If there are, SQLITE_LOCKED is
-** returned. In this case there is no "blocking connection", so invoking
-** sqlite3_unlock_notify() results in the unlock-notify callback being
-** invoked immediately. If the application then re-attempts the "DROP TABLE"
-** or "DROP INDEX" query, an infinite loop might be the result.
-**
-** One way around this problem is to check the extended error code returned
-** by an sqlite3_step() call. ^(If there is a blocking connection, then the
-** extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in
-** the special "DROP TABLE/INDEX" case, the extended error code is just
-** SQLITE_LOCKED.)^
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_unlock_notify(
- sqlite3 *pBlocked, /* Waiting connection */
- void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */
- void *pNotifyArg /* Argument to pass to xNotify */
-);
-
-
-/*
-** CAPI3REF: String Comparison
-**
-** ^The [sqlite3_stricmp()] and [sqlite3_strnicmp()] APIs allow applications
-** and extensions to compare the contents of two buffers containing UTF-8
-** strings in a case-independent fashion, using the same definition of "case
-** independence" that SQLite uses internally when comparing identifiers.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stricmp(const char *, const char *);
-SQLITE_API int SQLITE_STDCALL sqlite3_strnicmp(const char *, const char *, int);
-
-/*
-** CAPI3REF: String Globbing
-*
-** ^The [sqlite3_strglob(P,X)] interface returns zero if and only if
-** string X matches the [GLOB] pattern P.
-** ^The definition of [GLOB] pattern matching used in
-** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the
-** SQL dialect understood by SQLite. ^The [sqlite3_strglob(P,X)] function
-** is case sensitive.
-**
-** Note that this routine returns zero on a match and non-zero if the strings
-** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
-**
-** See also: [sqlite3_strlike()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strglob(const char *zGlob, const char *zStr);
-
-/*
-** CAPI3REF: String LIKE Matching
-*
-** ^The [sqlite3_strlike(P,X,E)] interface returns zero if and only if
-** string X matches the [LIKE] pattern P with escape character E.
-** ^The definition of [LIKE] pattern matching used in
-** [sqlite3_strlike(P,X,E)] is the same as for the "X LIKE P ESCAPE E"
-** operator in the SQL dialect understood by SQLite. ^For "X LIKE P" without
-** the ESCAPE clause, set the E parameter of [sqlite3_strlike(P,X,E)] to 0.
-** ^As with the LIKE operator, the [sqlite3_strlike(P,X,E)] function is case
-** insensitive - equivalent upper and lower case ASCII characters match
-** one another.
-**
-** ^The [sqlite3_strlike(P,X,E)] function matches Unicode characters, though
-** only ASCII characters are case folded.
-**
-** Note that this routine returns zero on a match and non-zero if the strings
-** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()].
-**
-** See also: [sqlite3_strglob()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_strlike(const char *zGlob, const char *zStr, unsigned int cEsc);
-
-/*
-** CAPI3REF: Error Logging Interface
-**
-** ^The [sqlite3_log()] interface writes a message into the [error log]
-** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()].
-** ^If logging is enabled, the zFormat string and subsequent arguments are
-** used with [sqlite3_snprintf()] to generate the final output string.
-**
-** The sqlite3_log() interface is intended for use by extensions such as
-** virtual tables, collating functions, and SQL functions. While there is
-** nothing to prevent an application from calling sqlite3_log(), doing so
-** is considered bad form.
-**
-** The zFormat string must not be NULL.
-**
-** To avoid deadlocks and other threading problems, the sqlite3_log() routine
-** will not use dynamically allocated memory. The log message is stored in
-** a fixed-length buffer on the stack. If the log message is longer than
-** a few hundred characters, it will be truncated to the length of the
-** buffer.
-*/
-SQLITE_API void SQLITE_CDECL sqlite3_log(int iErrCode, const char *zFormat, ...);
-
-/*
-** CAPI3REF: Write-Ahead Log Commit Hook
-** METHOD: sqlite3
-**
-** ^The [sqlite3_wal_hook()] function is used to register a callback that
-** is invoked each time data is committed to a database in wal mode.
-**
-** ^(The callback is invoked by SQLite after the commit has taken place and
-** the associated write-lock on the database released)^, so the implementation
-** may read, write or [checkpoint] the database as required.
-**
-** ^The first parameter passed to the callback function when it is invoked
-** is a copy of the third parameter passed to sqlite3_wal_hook() when
-** registering the callback. ^The second is a copy of the database handle.
-** ^The third parameter is the name of the database that was written to -
-** either "main" or the name of an [ATTACH]-ed database. ^The fourth parameter
-** is the number of pages currently in the write-ahead log file,
-** including those that were just committed.
-**
-** The callback function should normally return [SQLITE_OK]. ^If an error
-** code is returned, that error will propagate back up through the
-** SQLite code base to cause the statement that provoked the callback
-** to report an error, though the commit will have still occurred. If the
-** callback returns [SQLITE_ROW] or [SQLITE_DONE], or if it returns a value
-** that does not correspond to any valid SQLite error code, the results
-** are undefined.
-**
-** A single database handle may have at most a single write-ahead log callback
-** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any
-** previously registered write-ahead log callback. ^Note that the
-** [sqlite3_wal_autocheckpoint()] interface and the
-** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will
-** overwrite any prior [sqlite3_wal_hook()] settings.
-*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_wal_hook(
- sqlite3*,
- int(*)(void *,sqlite3*,const char*,int),
- void*
-);
-
-/*
-** CAPI3REF: Configure an auto-checkpoint
-** METHOD: sqlite3
-**
-** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around
-** [sqlite3_wal_hook()] that causes any database on [database connection] D
-** to automatically [checkpoint]
-** after committing a transaction if there are N or
-** more frames in the [write-ahead log] file. ^Passing zero or
-** a negative value as the nFrame parameter disables automatic
-** checkpoints entirely.
-**
-** ^The callback registered by this function replaces any existing callback
-** registered using [sqlite3_wal_hook()]. ^Likewise, registering a callback
-** using [sqlite3_wal_hook()] disables the automatic checkpoint mechanism
-** configured by this function.
-**
-** ^The [wal_autocheckpoint pragma] can be used to invoke this interface
-** from SQL.
-**
-** ^Checkpoints initiated by this mechanism are
-** [sqlite3_wal_checkpoint_v2|PASSIVE].
-**
-** ^Every new [database connection] defaults to having the auto-checkpoint
-** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT]
-** pages. The use of this interface
-** is only necessary if the default setting is found to be suboptimal
-** for a particular application.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_autocheckpoint(sqlite3 *db, int N);
-
-/*
-** CAPI3REF: Checkpoint a database
-** METHOD: sqlite3
-**
-** ^(The sqlite3_wal_checkpoint(D,X) is equivalent to
-** [sqlite3_wal_checkpoint_v2](D,X,[SQLITE_CHECKPOINT_PASSIVE],0,0).)^
-**
-** In brief, sqlite3_wal_checkpoint(D,X) causes the content in the
-** [write-ahead log] for database X on [database connection] D to be
-** transferred into the database file and for the write-ahead log to
-** be reset. See the [checkpointing] documentation for addition
-** information.
-**
-** This interface used to be the only way to cause a checkpoint to
-** occur. But then the newer and more powerful [sqlite3_wal_checkpoint_v2()]
-** interface was added. This interface is retained for backwards
-** compatibility and as a convenience for applications that need to manually
-** start a callback but which do not need the full power (and corresponding
-** complication) of [sqlite3_wal_checkpoint_v2()].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb);
-
-/*
-** CAPI3REF: Checkpoint a database
-** METHOD: sqlite3
-**
-** ^(The sqlite3_wal_checkpoint_v2(D,X,M,L,C) interface runs a checkpoint
-** operation on database X of [database connection] D in mode M. Status
-** information is written back into integers pointed to by L and C.)^
-** ^(The M parameter must be a valid [checkpoint mode]:)^
-**
-**
-** SQLITE_CHECKPOINT_PASSIVE
-** ^Checkpoint as many frames as possible without waiting for any database
-** readers or writers to finish, then sync the database file if all frames
-** in the log were checkpointed. ^The [busy-handler callback]
-** is never invoked in the SQLITE_CHECKPOINT_PASSIVE mode.
-** ^On the other hand, passive mode might leave the checkpoint unfinished
-** if there are concurrent readers or writers.
-**
-** SQLITE_CHECKPOINT_FULL
-** ^This mode blocks (it invokes the
-** [sqlite3_busy_handler|busy-handler callback]) until there is no
-** database writer and all readers are reading from the most recent database
-** snapshot. ^It then checkpoints all frames in the log file and syncs the
-** database file. ^This mode blocks new database writers while it is pending,
-** but new database readers are allowed to continue unimpeded.
-**
-** SQLITE_CHECKPOINT_RESTART
-** ^This mode works the same way as SQLITE_CHECKPOINT_FULL with the addition
-** that after checkpointing the log file it blocks (calls the
-** [busy-handler callback])
-** until all readers are reading from the database file only. ^This ensures
-** that the next writer will restart the log file from the beginning.
-** ^Like SQLITE_CHECKPOINT_FULL, this mode blocks new
-** database writer attempts while it is pending, but does not impede readers.
-**
-** SQLITE_CHECKPOINT_TRUNCATE
-** ^This mode works the same way as SQLITE_CHECKPOINT_RESTART with the
-** addition that it also truncates the log file to zero bytes just prior
-** to a successful return.
-**
-**
-** ^If pnLog is not NULL, then *pnLog is set to the total number of frames in
-** the log file or to -1 if the checkpoint could not run because
-** of an error or because the database is not in [WAL mode]. ^If pnCkpt is not
-** NULL,then *pnCkpt is set to the total number of checkpointed frames in the
-** log file (including any that were already checkpointed before the function
-** was called) or to -1 if the checkpoint could not run due to an error or
-** because the database is not in WAL mode. ^Note that upon successful
-** completion of an SQLITE_CHECKPOINT_TRUNCATE, the log file will have been
-** truncated to zero bytes and so both *pnLog and *pnCkpt will be set to zero.
-**
-** ^All calls obtain an exclusive "checkpoint" lock on the database file. ^If
-** any other process is running a checkpoint operation at the same time, the
-** lock cannot be obtained and SQLITE_BUSY is returned. ^Even if there is a
-** busy-handler configured, it will not be invoked in this case.
-**
-** ^The SQLITE_CHECKPOINT_FULL, RESTART and TRUNCATE modes also obtain the
-** exclusive "writer" lock on the database file. ^If the writer lock cannot be
-** obtained immediately, and a busy-handler is configured, it is invoked and
-** the writer lock retried until either the busy-handler returns 0 or the lock
-** is successfully obtained. ^The busy-handler is also invoked while waiting for
-** database readers as described above. ^If the busy-handler returns 0 before
-** the writer lock is obtained or while waiting for database readers, the
-** checkpoint operation proceeds from that point in the same way as
-** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible
-** without blocking any further. ^SQLITE_BUSY is returned in this case.
-**
-** ^If parameter zDb is NULL or points to a zero length string, then the
-** specified operation is attempted on all WAL databases [attached] to
-** [database connection] db. In this case the
-** values written to output parameters *pnLog and *pnCkpt are undefined. ^If
-** an SQLITE_BUSY error is encountered when processing one or more of the
-** attached WAL databases, the operation is still attempted on any remaining
-** attached databases and SQLITE_BUSY is returned at the end. ^If any other
-** error occurs while processing an attached database, processing is abandoned
-** and the error code is returned to the caller immediately. ^If no error
-** (SQLITE_BUSY or otherwise) is encountered while processing the attached
-** databases, SQLITE_OK is returned.
-**
-** ^If database zDb is the name of an attached database that is not in WAL
-** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. ^If
-** zDb is not NULL (or a zero length string) and is not the name of any
-** attached database, SQLITE_ERROR is returned to the caller.
-**
-** ^Unless it returns SQLITE_MISUSE,
-** the sqlite3_wal_checkpoint_v2() interface
-** sets the error information that is queried by
-** [sqlite3_errcode()] and [sqlite3_errmsg()].
-**
-** ^The [PRAGMA wal_checkpoint] command can be used to invoke this interface
-** from SQL.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_wal_checkpoint_v2(
- sqlite3 *db, /* Database handle */
- const char *zDb, /* Name of attached database (or NULL) */
- int eMode, /* SQLITE_CHECKPOINT_* value */
- int *pnLog, /* OUT: Size of WAL log in frames */
- int *pnCkpt /* OUT: Total number of frames checkpointed */
-);
-
-/*
-** CAPI3REF: Checkpoint Mode Values
-** KEYWORDS: {checkpoint mode}
-**
-** These constants define all valid values for the "checkpoint mode" passed
-** as the third parameter to the [sqlite3_wal_checkpoint_v2()] interface.
-** See the [sqlite3_wal_checkpoint_v2()] documentation for details on the
-** meaning of each of these checkpoint modes.
-*/
-#define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */
-#define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */
-#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for for readers */
-#define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */
-
-/*
-** CAPI3REF: Virtual Table Interface Configuration
-**
-** This function may be called by either the [xConnect] or [xCreate] method
-** of a [virtual table] implementation to configure
-** various facets of the virtual table interface.
-**
-** If this interface is invoked outside the context of an xConnect or
-** xCreate virtual table method then the behavior is undefined.
-**
-** At present, there is only one option that may be configured using
-** this function. (See [SQLITE_VTAB_CONSTRAINT_SUPPORT].) Further options
-** may be added in the future.
-*/
-SQLITE_API int SQLITE_CDECL sqlite3_vtab_config(sqlite3*, int op, ...);
-
-/*
-** CAPI3REF: Virtual Table Configuration Options
-**
-** These macros define the various options to the
-** [sqlite3_vtab_config()] interface that [virtual table] implementations
-** can use to customize and optimize their behavior.
-**
-**
-** SQLITE_VTAB_CONSTRAINT_SUPPORT
-** Calls of the form
-** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported,
-** where X is an integer. If X is zero, then the [virtual table] whose
-** [xCreate] or [xConnect] method invoked [sqlite3_vtab_config()] does not
-** support constraints. In this configuration (which is the default) if
-** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire
-** statement is rolled back as if [ON CONFLICT | OR ABORT] had been
-** specified as part of the users SQL statement, regardless of the actual
-** ON CONFLICT mode specified.
-**
-** If X is non-zero, then the virtual table implementation guarantees
-** that if [xUpdate] returns [SQLITE_CONSTRAINT], it will do so before
-** any modifications to internal or persistent data structures have been made.
-** If the [ON CONFLICT] mode is ABORT, FAIL, IGNORE or ROLLBACK, SQLite
-** is able to roll back a statement or database transaction, and abandon
-** or continue processing the current SQL statement as appropriate.
-** If the ON CONFLICT mode is REPLACE and the [xUpdate] method returns
-** [SQLITE_CONSTRAINT], SQLite handles this as if the ON CONFLICT mode
-** had been ABORT.
-**
-** Virtual table implementations that are required to handle OR REPLACE
-** must do so within the [xUpdate] method. If a call to the
-** [sqlite3_vtab_on_conflict()] function indicates that the current ON
-** CONFLICT policy is REPLACE, the virtual table implementation should
-** silently replace the appropriate rows within the xUpdate callback and
-** return SQLITE_OK. Or, if this is not possible, it may return
-** SQLITE_CONSTRAINT, in which case SQLite falls back to OR ABORT
-** constraint handling.
-**
-*/
-#define SQLITE_VTAB_CONSTRAINT_SUPPORT 1
-
-/*
-** CAPI3REF: Determine The Virtual Table Conflict Policy
-**
-** This function may only be called from within a call to the [xUpdate] method
-** of a [virtual table] implementation for an INSERT or UPDATE operation. ^The
-** value returned is one of [SQLITE_ROLLBACK], [SQLITE_IGNORE], [SQLITE_FAIL],
-** [SQLITE_ABORT], or [SQLITE_REPLACE], according to the [ON CONFLICT] mode
-** of the SQL statement that triggered the call to the [xUpdate] method of the
-** [virtual table].
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vtab_on_conflict(sqlite3 *);
-
-/*
-** CAPI3REF: Conflict resolution modes
-** KEYWORDS: {conflict resolution mode}
-**
-** These constants are returned by [sqlite3_vtab_on_conflict()] to
-** inform a [virtual table] implementation what the [ON CONFLICT] mode
-** is for the SQL statement being evaluated.
-**
-** Note that the [SQLITE_IGNORE] constant is also used as a potential
-** return value from the [sqlite3_set_authorizer()] callback and that
-** [SQLITE_ABORT] is also a [result code].
-*/
-#define SQLITE_ROLLBACK 1
-/* #define SQLITE_IGNORE 2 // Also used by sqlite3_authorizer() callback */
-#define SQLITE_FAIL 3
-/* #define SQLITE_ABORT 4 // Also an error code */
-#define SQLITE_REPLACE 5
-
-/*
-** CAPI3REF: Prepared Statement Scan Status Opcodes
-** KEYWORDS: {scanstatus options}
-**
-** The following constants can be used for the T parameter to the
-** [sqlite3_stmt_scanstatus(S,X,T,V)] interface. Each constant designates a
-** different metric for sqlite3_stmt_scanstatus() to return.
-**
-** When the value returned to V is a string, space to hold that string is
-** managed by the prepared statement S and will be automatically freed when
-** S is finalized.
-**
-**
-** [[SQLITE_SCANSTAT_NLOOP]] SQLITE_SCANSTAT_NLOOP
-** ^The [sqlite3_int64] variable pointed to by the T parameter will be
-** set to the total number of times that the X-th loop has run.
-**
-** [[SQLITE_SCANSTAT_NVISIT]] SQLITE_SCANSTAT_NVISIT
-** ^The [sqlite3_int64] variable pointed to by the T parameter will be set
-** to the total number of rows examined by all iterations of the X-th loop.
-**
-** [[SQLITE_SCANSTAT_EST]] SQLITE_SCANSTAT_EST
-** ^The "double" variable pointed to by the T parameter will be set to the
-** query planner's estimate for the average number of rows output from each
-** iteration of the X-th loop. If the query planner's estimates was accurate,
-** then this value will approximate the quotient NVISIT/NLOOP and the
-** product of this value for all prior loops with the same SELECTID will
-** be the NLOOP value for the current loop.
-**
-** [[SQLITE_SCANSTAT_NAME]] SQLITE_SCANSTAT_NAME
-** ^The "const char *" variable pointed to by the T parameter will be set
-** to a zero-terminated UTF-8 string containing the name of the index or table
-** used for the X-th loop.
-**
-** [[SQLITE_SCANSTAT_EXPLAIN]] SQLITE_SCANSTAT_EXPLAIN
-** ^The "const char *" variable pointed to by the T parameter will be set
-** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN]
-** description for the X-th loop.
-**
-** [[SQLITE_SCANSTAT_SELECTID]] SQLITE_SCANSTAT_SELECT
-** ^The "int" variable pointed to by the T parameter will be set to the
-** "select-id" for the X-th loop. The select-id identifies which query or
-** subquery the loop is part of. The main query has a select-id of zero.
-** The select-id is the same value as is output in the first column
-** of an [EXPLAIN QUERY PLAN] query.
-**
-*/
-#define SQLITE_SCANSTAT_NLOOP 0
-#define SQLITE_SCANSTAT_NVISIT 1
-#define SQLITE_SCANSTAT_EST 2
-#define SQLITE_SCANSTAT_NAME 3
-#define SQLITE_SCANSTAT_EXPLAIN 4
-#define SQLITE_SCANSTAT_SELECTID 5
-
-/*
-** CAPI3REF: Prepared Statement Scan Status
-** METHOD: sqlite3_stmt
-**
-** This interface returns information about the predicted and measured
-** performance for pStmt. Advanced applications can use this
-** interface to compare the predicted and the measured performance and
-** issue warnings and/or rerun [ANALYZE] if discrepancies are found.
-**
-** Since this interface is expected to be rarely used, it is only
-** available if SQLite is compiled using the [SQLITE_ENABLE_STMT_SCANSTATUS]
-** compile-time option.
-**
-** The "iScanStatusOp" parameter determines which status information to return.
-** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior
-** of this interface is undefined.
-** ^The requested measurement is written into a variable pointed to by
-** the "pOut" parameter.
-** Parameter "idx" identifies the specific loop to retrieve statistics for.
-** Loops are numbered starting from zero. ^If idx is out of range - less than
-** zero or greater than or equal to the total number of loops used to implement
-** the statement - a non-zero value is returned and the variable that pOut
-** points to is unchanged.
-**
-** ^Statistics might not be available for all loops in all statements. ^In cases
-** where there exist loops with no available statistics, this function behaves
-** as if the loop did not exist - it returns non-zero and leave the variable
-** that pOut points to unchanged.
-**
-** See also: [sqlite3_stmt_scanstatus_reset()]
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_stmt_scanstatus(
- sqlite3_stmt *pStmt, /* Prepared statement for which info desired */
- int idx, /* Index of loop to report on */
- int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
- void *pOut /* Result written here */
-);
-
-/*
-** CAPI3REF: Zero Scan-Status Counters
-** METHOD: sqlite3_stmt
-**
-** ^Zero all [sqlite3_stmt_scanstatus()] related event counters.
-**
-** This API is only available if the library is built with pre-processor
-** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_stmt_scanstatus_reset(sqlite3_stmt*);
-
-/*
-** CAPI3REF: Flush caches to disk mid-transaction
-**
-** ^If a write-transaction is open on [database connection] D when the
-** [sqlite3_db_cacheflush(D)] interface invoked, any dirty
-** pages in the pager-cache that are not currently in use are written out
-** to disk. A dirty page may be in use if a database cursor created by an
-** active SQL statement is reading from it, or if it is page 1 of a database
-** file (page 1 is always "in use"). ^The [sqlite3_db_cacheflush(D)]
-** interface flushes caches for all schemas - "main", "temp", and
-** any [attached] databases.
-**
-** ^If this function needs to obtain extra database locks before dirty pages
-** can be flushed to disk, it does so. ^If those locks cannot be obtained
-** immediately and there is a busy-handler callback configured, it is invoked
-** in the usual manner. ^If the required lock still cannot be obtained, then
-** the database is skipped and an attempt made to flush any dirty pages
-** belonging to the next (if any) database. ^If any databases are skipped
-** because locks cannot be obtained, but no other error occurs, this
-** function returns SQLITE_BUSY.
-**
-** ^If any other error occurs while flushing dirty pages to disk (for
-** example an IO error or out-of-memory condition), then processing is
-** abandoned and an SQLite [error code] is returned to the caller immediately.
-**
-** ^Otherwise, if no error occurs, [sqlite3_db_cacheflush()] returns SQLITE_OK.
-**
-** ^This function does not set the database handle error code or message
-** returned by the [sqlite3_errcode()] and [sqlite3_errmsg()] functions.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_cacheflush(sqlite3*);
-
-/*
-** CAPI3REF: The pre-update hook.
-**
-** ^These interfaces are only available if SQLite is compiled using the
-** [SQLITE_ENABLE_PREUPDATE_HOOK] compile-time option.
-**
-** ^The [sqlite3_preupdate_hook()] interface registers a callback function
-** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation
-** on a [rowid table].
-** ^At most one preupdate hook may be registered at a time on a single
-** [database connection]; each call to [sqlite3_preupdate_hook()] overrides
-** the previous setting.
-** ^The preupdate hook is disabled by invoking [sqlite3_preupdate_hook()]
-** with a NULL pointer as the second parameter.
-** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as
-** the first parameter to callbacks.
-**
-** ^The preupdate hook only fires for changes to [rowid tables]; the preupdate
-** hook is not invoked for changes to [virtual tables] or [WITHOUT ROWID]
-** tables.
-**
-** ^The second parameter to the preupdate callback is a pointer to
-** the [database connection] that registered the preupdate hook.
-** ^The third parameter to the preupdate callback is one of the constants
-** [SQLITE_INSERT], [SQLITE_DELETE], or [SQLITE_UPDATE] to identify the
-** kind of update operation that is about to occur.
-** ^(The fourth parameter to the preupdate callback is the name of the
-** database within the database connection that is being modified. This
-** will be "main" for the main database or "temp" for TEMP tables or
-** the name given after the AS keyword in the [ATTACH] statement for attached
-** databases.)^
-** ^The fifth parameter to the preupdate callback is the name of the
-** table that is being modified.
-** ^The sixth parameter to the preupdate callback is the initial [rowid] of the
-** row being changes for SQLITE_UPDATE and SQLITE_DELETE changes and is
-** undefined for SQLITE_INSERT changes.
-** ^The seventh parameter to the preupdate callback is the final [rowid] of
-** the row being changed for SQLITE_UPDATE and SQLITE_INSERT changes and is
-** undefined for SQLITE_DELETE changes.
-**
-** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()],
-** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces
-** provide additional information about a preupdate event. These routines
-** may only be called from within a preupdate callback. Invoking any of
-** these routines from outside of a preupdate callback or with a
-** [database connection] pointer that is different from the one supplied
-** to the preupdate callback results in undefined and probably undesirable
-** behavior.
-**
-** ^The [sqlite3_preupdate_count(D)] interface returns the number of columns
-** in the row that is being inserted, updated, or deleted.
-**
-** ^The [sqlite3_preupdate_old(D,N,P)] interface writes into P a pointer to
-** a [protected sqlite3_value] that contains the value of the Nth column of
-** the table row before it is updated. The N parameter must be between 0
-** and one less than the number of columns or the behavior will be
-** undefined. This must only be used within SQLITE_UPDATE and SQLITE_DELETE
-** preupdate callbacks; if it is used by an SQLITE_INSERT callback then the
-** behavior is undefined. The [sqlite3_value] that P points to
-** will be destroyed when the preupdate callback returns.
-**
-** ^The [sqlite3_preupdate_new(D,N,P)] interface writes into P a pointer to
-** a [protected sqlite3_value] that contains the value of the Nth column of
-** the table row after it is updated. The N parameter must be between 0
-** and one less than the number of columns or the behavior will be
-** undefined. This must only be used within SQLITE_INSERT and SQLITE_UPDATE
-** preupdate callbacks; if it is used by an SQLITE_DELETE callback then the
-** behavior is undefined. The [sqlite3_value] that P points to
-** will be destroyed when the preupdate callback returns.
-**
-** ^The [sqlite3_preupdate_depth(D)] interface returns 0 if the preupdate
-** callback was invoked as a result of a direct insert, update, or delete
-** operation; or 1 for inserts, updates, or deletes invoked by top-level
-** triggers; or 2 for changes resulting from triggers called by top-level
-** triggers; and so forth.
-**
-** See also: [sqlite3_update_hook()]
-*/
-SQLITE_API SQLITE_EXPERIMENTAL void *SQLITE_STDCALL sqlite3_preupdate_hook(
- sqlite3 *db,
- void(*xPreUpdate)(
- void *pCtx, /* Copy of third arg to preupdate_hook() */
- sqlite3 *db, /* Database handle */
- int op, /* SQLITE_UPDATE, DELETE or INSERT */
- char const *zDb, /* Database name */
- char const *zName, /* Table name */
- sqlite3_int64 iKey1, /* Rowid of row about to be deleted/updated */
- sqlite3_int64 iKey2 /* New rowid value (for a rowid UPDATE) */
- ),
- void*
-);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_count(sqlite3 *);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_depth(sqlite3 *);
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
-
-/*
-** CAPI3REF: Low-level system error code
-**
-** ^Attempt to return the underlying operating system error code or error
-** number that caused the most recent I/O error or failure to open a file.
-** The return value is OS-dependent. For example, on unix systems, after
-** [sqlite3_open_v2()] returns [SQLITE_CANTOPEN], this interface could be
-** called to get back the underlying "errno" that caused the problem, such
-** as ENOSPC, EAUTH, EISDIR, and so forth.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_system_errno(sqlite3*);
-
-/*
-** CAPI3REF: Database Snapshot
-** KEYWORDS: {snapshot}
-** EXPERIMENTAL
-**
-** An instance of the snapshot object records the state of a [WAL mode]
-** database for some specific point in history.
-**
-** In [WAL mode], multiple [database connections] that are open on the
-** same database file can each be reading a different historical version
-** of the database file. When a [database connection] begins a read
-** transaction, that connection sees an unchanging copy of the database
-** as it existed for the point in time when the transaction first started.
-** Subsequent changes to the database from other connections are not seen
-** by the reader until a new read transaction is started.
-**
-** The sqlite3_snapshot object records state information about an historical
-** version of the database file so that it is possible to later open a new read
-** transaction that sees that historical version of the database rather than
-** the most recent version.
-**
-** The constructor for this object is [sqlite3_snapshot_get()]. The
-** [sqlite3_snapshot_open()] method causes a fresh read transaction to refer
-** to an historical snapshot (if possible). The destructor for
-** sqlite3_snapshot objects is [sqlite3_snapshot_free()].
-*/
-typedef struct sqlite3_snapshot sqlite3_snapshot;
-
-/*
-** CAPI3REF: Record A Database Snapshot
-** EXPERIMENTAL
-**
-** ^The [sqlite3_snapshot_get(D,S,P)] interface attempts to make a
-** new [sqlite3_snapshot] object that records the current state of
-** schema S in database connection D. ^On success, the
-** [sqlite3_snapshot_get(D,S,P)] interface writes a pointer to the newly
-** created [sqlite3_snapshot] object into *P and returns SQLITE_OK.
-** ^If schema S of [database connection] D is not a [WAL mode] database
-** that is in a read transaction, then [sqlite3_snapshot_get(D,S,P)]
-** leaves the *P value unchanged and returns an appropriate [error code].
-**
-** The [sqlite3_snapshot] object returned from a successful call to
-** [sqlite3_snapshot_get()] must be freed using [sqlite3_snapshot_free()]
-** to avoid a memory leak.
-**
-** The [sqlite3_snapshot_get()] interface is only available when the
-** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
-*/
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_get(
- sqlite3 *db,
- const char *zSchema,
- sqlite3_snapshot **ppSnapshot
-);
-
-/*
-** CAPI3REF: Start a read transaction on an historical snapshot
-** EXPERIMENTAL
-**
-** ^The [sqlite3_snapshot_open(D,S,P)] interface starts a
-** read transaction for schema S of
-** [database connection] D such that the read transaction
-** refers to historical [snapshot] P, rather than the most
-** recent change to the database.
-** ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK on success
-** or an appropriate [error code] if it fails.
-**
-** ^In order to succeed, a call to [sqlite3_snapshot_open(D,S,P)] must be
-** the first operation following the [BEGIN] that takes the schema S
-** out of [autocommit mode].
-** ^In other words, schema S must not currently be in
-** a transaction for [sqlite3_snapshot_open(D,S,P)] to work, but the
-** database connection D must be out of [autocommit mode].
-** ^A [snapshot] will fail to open if it has been overwritten by a
-** [checkpoint].
-** ^(A call to [sqlite3_snapshot_open(D,S,P)] will fail if the
-** database connection D does not know that the database file for
-** schema S is in [WAL mode]. A database connection might not know
-** that the database file is in [WAL mode] if there has been no prior
-** I/O on that database connection, or if the database entered [WAL mode]
-** after the most recent I/O on the database connection.)^
-** (Hint: Run "[PRAGMA application_id]" against a newly opened
-** database connection in order to make it ready to use snapshots.)
-**
-** The [sqlite3_snapshot_open()] interface is only available when the
-** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
-*/
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_open(
- sqlite3 *db,
- const char *zSchema,
- sqlite3_snapshot *pSnapshot
-);
-
-/*
-** CAPI3REF: Destroy a snapshot
-** EXPERIMENTAL
-**
-** ^The [sqlite3_snapshot_free(P)] interface destroys [sqlite3_snapshot] P.
-** The application must eventually free every [sqlite3_snapshot] object
-** using this routine to avoid a memory leak.
-**
-** The [sqlite3_snapshot_free()] interface is only available when the
-** SQLITE_ENABLE_SNAPSHOT compile-time option is used.
-*/
-SQLITE_API SQLITE_EXPERIMENTAL void SQLITE_STDCALL sqlite3_snapshot_free(sqlite3_snapshot*);
-
-/*
-** CAPI3REF: Compare the ages of two snapshot handles.
-** EXPERIMENTAL
-**
-** The sqlite3_snapshot_cmp(P1, P2) interface is used to compare the ages
-** of two valid snapshot handles.
-**
-** If the two snapshot handles are not associated with the same database
-** file, the result of the comparison is undefined.
-**
-** Additionally, the result of the comparison is only valid if both of the
-** snapshot handles were obtained by calling sqlite3_snapshot_get() since the
-** last time the wal file was deleted. The wal file is deleted when the
-** database is changed back to rollback mode or when the number of database
-** clients drops to zero. If either snapshot handle was obtained before the
-** wal file was last deleted, the value returned by this function
-** is undefined.
-**
-** Otherwise, this API returns a negative value if P1 refers to an older
-** snapshot than P2, zero if the two handles refer to the same database
-** snapshot, and a positive value if P1 is a newer snapshot than P2.
-*/
-SQLITE_API SQLITE_EXPERIMENTAL int SQLITE_STDCALL sqlite3_snapshot_cmp(
- sqlite3_snapshot *p1,
- sqlite3_snapshot *p2
-);
-
-/*
-** Undo the hack that converts floating point types to integer for
-** builds on processors without floating point support.
-*/
-#ifdef SQLITE_OMIT_FLOATING_POINT
-# undef double
-#endif
-
-#if 0
-} /* End of the 'extern "C"' block */
-#endif
-#endif /* SQLITE3_H */
-
-/******** Begin file sqlite3rtree.h *********/
-/*
-** 2010 August 30
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-*/
-
-#ifndef _SQLITE3RTREE_H_
-#define _SQLITE3RTREE_H_
-
-
-#if 0
-extern "C" {
-#endif
-
-typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry;
-typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info;
-
-/* The double-precision datatype used by RTree depends on the
-** SQLITE_RTREE_INT_ONLY compile-time option.
-*/
-#ifdef SQLITE_RTREE_INT_ONLY
- typedef sqlite3_int64 sqlite3_rtree_dbl;
-#else
- typedef double sqlite3_rtree_dbl;
-#endif
-
-/*
-** Register a geometry callback named zGeom that can be used as part of an
-** R-Tree geometry query as follows:
-**
-** SELECT ... FROM WHERE MATCH $zGeom(... params ...)
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_geometry_callback(
- sqlite3 *db,
- const char *zGeom,
- int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*),
- void *pContext
-);
-
-
-/*
-** A pointer to a structure of the following type is passed as the first
-** argument to callbacks registered using rtree_geometry_callback().
-*/
-struct sqlite3_rtree_geometry {
- void *pContext; /* Copy of pContext passed to s_r_g_c() */
- int nParam; /* Size of array aParam[] */
- sqlite3_rtree_dbl *aParam; /* Parameters passed to SQL geom function */
- void *pUser; /* Callback implementation user data */
- void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */
-};
-
-/*
-** Register a 2nd-generation geometry callback named zScore that can be
-** used as part of an R-Tree geometry query as follows:
-**
-** SELECT ... FROM WHERE MATCH $zQueryFunc(... params ...)
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_rtree_query_callback(
- sqlite3 *db,
- const char *zQueryFunc,
- int (*xQueryFunc)(sqlite3_rtree_query_info*),
- void *pContext,
- void (*xDestructor)(void*)
-);
-
-
-/*
-** A pointer to a structure of the following type is passed as the
-** argument to scored geometry callback registered using
-** sqlite3_rtree_query_callback().
-**
-** Note that the first 5 fields of this structure are identical to
-** sqlite3_rtree_geometry. This structure is a subclass of
-** sqlite3_rtree_geometry.
-*/
-struct sqlite3_rtree_query_info {
- void *pContext; /* pContext from when function registered */
- int nParam; /* Number of function parameters */
- sqlite3_rtree_dbl *aParam; /* value of function parameters */
- void *pUser; /* callback can use this, if desired */
- void (*xDelUser)(void*); /* function to free pUser */
- sqlite3_rtree_dbl *aCoord; /* Coordinates of node or entry to check */
- unsigned int *anQueue; /* Number of pending entries in the queue */
- int nCoord; /* Number of coordinates */
- int iLevel; /* Level of current node or entry */
- int mxLevel; /* The largest iLevel value in the tree */
- sqlite3_int64 iRowid; /* Rowid for current entry */
- sqlite3_rtree_dbl rParentScore; /* Score of parent node */
- int eParentWithin; /* Visibility of parent node */
- int eWithin; /* OUT: Visiblity */
- sqlite3_rtree_dbl rScore; /* OUT: Write the score here */
- /* The following fields are only available in 3.8.11 and later */
- sqlite3_value **apSqlParam; /* Original SQL values of parameters */
-};
-
-/*
-** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin.
-*/
-#define NOT_WITHIN 0 /* Object completely outside of query region */
-#define PARTLY_WITHIN 1 /* Object partially overlaps query region */
-#define FULLY_WITHIN 2 /* Object fully contained within query region */
-
-
-#if 0
-} /* end of the 'extern "C"' block */
-#endif
-
-#endif /* ifndef _SQLITE3RTREE_H_ */
-
-/******** End of sqlite3rtree.h *********/
-/******** Begin file sqlite3session.h *********/
-
-#if !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION)
-#define __SQLITESESSION_H_ 1
-
-/*
-** Make sure we can call this stuff from C++.
-*/
-#if 0
-extern "C" {
-#endif
-
-
-/*
-** CAPI3REF: Session Object Handle
-*/
-typedef struct sqlite3_session sqlite3_session;
-
-/*
-** CAPI3REF: Changeset Iterator Handle
-*/
-typedef struct sqlite3_changeset_iter sqlite3_changeset_iter;
-
-/*
-** CAPI3REF: Create A New Session Object
-**
-** Create a new session object attached to database handle db. If successful,
-** a pointer to the new object is written to *ppSession and SQLITE_OK is
-** returned. If an error occurs, *ppSession is set to NULL and an SQLite
-** error code (e.g. SQLITE_NOMEM) is returned.
-**
-** It is possible to create multiple session objects attached to a single
-** database handle.
-**
-** Session objects created using this function should be deleted using the
-** [sqlite3session_delete()] function before the database handle that they
-** are attached to is itself closed. If the database handle is closed before
-** the session object is deleted, then the results of calling any session
-** module function, including [sqlite3session_delete()] on the session object
-** are undefined.
-**
-** Because the session module uses the [sqlite3_preupdate_hook()] API, it
-** is not possible for an application to register a pre-update hook on a
-** database handle that has one or more session objects attached. Nor is
-** it possible to create a session object attached to a database handle for
-** which a pre-update hook is already defined. The results of attempting
-** either of these things are undefined.
-**
-** The session object will be used to create changesets for tables in
-** database zDb, where zDb is either "main", or "temp", or the name of an
-** attached database. It is not an error if database zDb is not attached
-** to the database when the session object is created.
-*/
-int sqlite3session_create(
- sqlite3 *db, /* Database handle */
- const char *zDb, /* Name of db (e.g. "main") */
- sqlite3_session **ppSession /* OUT: New session object */
-);
-
-/*
-** CAPI3REF: Delete A Session Object
-**
-** Delete a session object previously allocated using
-** [sqlite3session_create()]. Once a session object has been deleted, the
-** results of attempting to use pSession with any other session module
-** function are undefined.
-**
-** Session objects must be deleted before the database handle to which they
-** are attached is closed. Refer to the documentation for
-** [sqlite3session_create()] for details.
-*/
-void sqlite3session_delete(sqlite3_session *pSession);
-
-
-/*
-** CAPI3REF: Enable Or Disable A Session Object
-**
-** Enable or disable the recording of changes by a session object. When
-** enabled, a session object records changes made to the database. When
-** disabled - it does not. A newly created session object is enabled.
-** Refer to the documentation for [sqlite3session_changeset()] for further
-** details regarding how enabling and disabling a session object affects
-** the eventual changesets.
-**
-** Passing zero to this function disables the session. Passing a value
-** greater than zero enables it. Passing a value less than zero is a
-** no-op, and may be used to query the current state of the session.
-**
-** The return value indicates the final state of the session object: 0 if
-** the session is disabled, or 1 if it is enabled.
-*/
-int sqlite3session_enable(sqlite3_session *pSession, int bEnable);
-
-/*
-** CAPI3REF: Set Or Clear the Indirect Change Flag
-**
-** Each change recorded by a session object is marked as either direct or
-** indirect. A change is marked as indirect if either:
-**
-**
-** The session object "indirect" flag is set when the change is
-** made, or
-** The change is made by an SQL trigger or foreign key action
-** instead of directly as a result of a users SQL statement.
-**
-**
-** If a single row is affected by more than one operation within a session,
-** then the change is considered indirect if all operations meet the criteria
-** for an indirect change above, or direct otherwise.
-**
-** This function is used to set, clear or query the session object indirect
-** flag. If the second argument passed to this function is zero, then the
-** indirect flag is cleared. If it is greater than zero, the indirect flag
-** is set. Passing a value less than zero does not modify the current value
-** of the indirect flag, and may be used to query the current state of the
-** indirect flag for the specified session object.
-**
-** The return value indicates the final state of the indirect flag: 0 if
-** it is clear, or 1 if it is set.
-*/
-int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect);
-
-/*
-** CAPI3REF: Attach A Table To A Session Object
-**
-** If argument zTab is not NULL, then it is the name of a table to attach
-** to the session object passed as the first argument. All subsequent changes
-** made to the table while the session object is enabled will be recorded. See
-** documentation for [sqlite3session_changeset()] for further details.
-**
-** Or, if argument zTab is NULL, then changes are recorded for all tables
-** in the database. If additional tables are added to the database (by
-** executing "CREATE TABLE" statements) after this call is made, changes for
-** the new tables are also recorded.
-**
-** Changes can only be recorded for tables that have a PRIMARY KEY explicitly
-** defined as part of their CREATE TABLE statement. It does not matter if the
-** PRIMARY KEY is an "INTEGER PRIMARY KEY" (rowid alias) or not. The PRIMARY
-** KEY may consist of a single column, or may be a composite key.
-**
-** It is not an error if the named table does not exist in the database. Nor
-** is it an error if the named table does not have a PRIMARY KEY. However,
-** no changes will be recorded in either of these scenarios.
-**
-** Changes are not recorded for individual rows that have NULL values stored
-** in one or more of their PRIMARY KEY columns.
-**
-** SQLITE_OK is returned if the call completes without error. Or, if an error
-** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned.
-*/
-int sqlite3session_attach(
- sqlite3_session *pSession, /* Session object */
- const char *zTab /* Table name */
-);
-
-/*
-** CAPI3REF: Set a table filter on a Session Object.
-**
-** The second argument (xFilter) is the "filter callback". For changes to rows
-** in tables that are not attached to the Session oject, the filter is called
-** to determine whether changes to the table's rows should be tracked or not.
-** If xFilter returns 0, changes is not tracked. Note that once a table is
-** attached, xFilter will not be called again.
-*/
-void sqlite3session_table_filter(
- sqlite3_session *pSession, /* Session object */
- int(*xFilter)(
- void *pCtx, /* Copy of third arg to _filter_table() */
- const char *zTab /* Table name */
- ),
- void *pCtx /* First argument passed to xFilter */
-);
-
-/*
-** CAPI3REF: Generate A Changeset From A Session Object
-**
-** Obtain a changeset containing changes to the tables attached to the
-** session object passed as the first argument. If successful,
-** set *ppChangeset to point to a buffer containing the changeset
-** and *pnChangeset to the size of the changeset in bytes before returning
-** SQLITE_OK. If an error occurs, set both *ppChangeset and *pnChangeset to
-** zero and return an SQLite error code.
-**
-** A changeset consists of zero or more INSERT, UPDATE and/or DELETE changes,
-** each representing a change to a single row of an attached table. An INSERT
-** change contains the values of each field of a new database row. A DELETE
-** contains the original values of each field of a deleted database row. An
-** UPDATE change contains the original values of each field of an updated
-** database row along with the updated values for each updated non-primary-key
-** column. It is not possible for an UPDATE change to represent a change that
-** modifies the values of primary key columns. If such a change is made, it
-** is represented in a changeset as a DELETE followed by an INSERT.
-**
-** Changes are not recorded for rows that have NULL values stored in one or
-** more of their PRIMARY KEY columns. If such a row is inserted or deleted,
-** no corresponding change is present in the changesets returned by this
-** function. If an existing row with one or more NULL values stored in
-** PRIMARY KEY columns is updated so that all PRIMARY KEY columns are non-NULL,
-** only an INSERT is appears in the changeset. Similarly, if an existing row
-** with non-NULL PRIMARY KEY values is updated so that one or more of its
-** PRIMARY KEY columns are set to NULL, the resulting changeset contains a
-** DELETE change only.
-**
-** The contents of a changeset may be traversed using an iterator created
-** using the [sqlite3changeset_start()] API. A changeset may be applied to
-** a database with a compatible schema using the [sqlite3changeset_apply()]
-** API.
-**
-** Within a changeset generated by this function, all changes related to a
-** single table are grouped together. In other words, when iterating through
-** a changeset or when applying a changeset to a database, all changes related
-** to a single table are processed before moving on to the next table. Tables
-** are sorted in the same order in which they were attached (or auto-attached)
-** to the sqlite3_session object. The order in which the changes related to
-** a single table are stored is undefined.
-**
-** Following a successful call to this function, it is the responsibility of
-** the caller to eventually free the buffer that *ppChangeset points to using
-** [sqlite3_free()].
-**
-** Changeset Generation
-**
-** Once a table has been attached to a session object, the session object
-** records the primary key values of all new rows inserted into the table.
-** It also records the original primary key and other column values of any
-** deleted or updated rows. For each unique primary key value, data is only
-** recorded once - the first time a row with said primary key is inserted,
-** updated or deleted in the lifetime of the session.
-**
-** There is one exception to the previous paragraph: when a row is inserted,
-** updated or deleted, if one or more of its primary key columns contain a
-** NULL value, no record of the change is made.
-**
-** The session object therefore accumulates two types of records - those
-** that consist of primary key values only (created when the user inserts
-** a new record) and those that consist of the primary key values and the
-** original values of other table columns (created when the users deletes
-** or updates a record).
-**
-** When this function is called, the requested changeset is created using
-** both the accumulated records and the current contents of the database
-** file. Specifically:
-**
-**
-** For each record generated by an insert, the database is queried
-** for a row with a matching primary key. If one is found, an INSERT
-** change is added to the changeset. If no such row is found, no change
-** is added to the changeset.
-**
-** For each record generated by an update or delete, the database is
-** queried for a row with a matching primary key. If such a row is
-** found and one or more of the non-primary key fields have been
-** modified from their original values, an UPDATE change is added to
-** the changeset. Or, if no such row is found in the table, a DELETE
-** change is added to the changeset. If there is a row with a matching
-** primary key in the database, but all fields contain their original
-** values, no change is added to the changeset.
-**
-**
-** This means, amongst other things, that if a row is inserted and then later
-** deleted while a session object is active, neither the insert nor the delete
-** will be present in the changeset. Or if a row is deleted and then later a
-** row with the same primary key values inserted while a session object is
-** active, the resulting changeset will contain an UPDATE change instead of
-** a DELETE and an INSERT.
-**
-** When a session object is disabled (see the [sqlite3session_enable()] API),
-** it does not accumulate records when rows are inserted, updated or deleted.
-** This may appear to have some counter-intuitive effects if a single row
-** is written to more than once during a session. For example, if a row
-** is inserted while a session object is enabled, then later deleted while
-** the same session object is disabled, no INSERT record will appear in the
-** changeset, even though the delete took place while the session was disabled.
-** Or, if one field of a row is updated while a session is disabled, and
-** another field of the same row is updated while the session is enabled, the
-** resulting changeset will contain an UPDATE change that updates both fields.
-*/
-int sqlite3session_changeset(
- sqlite3_session *pSession, /* Session object */
- int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */
- void **ppChangeset /* OUT: Buffer containing changeset */
-);
-
-/*
-** CAPI3REF: Load The Difference Between Tables Into A Session
-**
-** If it is not already attached to the session object passed as the first
-** argument, this function attaches table zTbl in the same manner as the
-** [sqlite3session_attach()] function. If zTbl does not exist, or if it
-** does not have a primary key, this function is a no-op (but does not return
-** an error).
-**
-** Argument zFromDb must be the name of a database ("main", "temp" etc.)
-** attached to the same database handle as the session object that contains
-** a table compatible with the table attached to the session by this function.
-** A table is considered compatible if it:
-**
-**
-** Has the same name,
-** Has the same set of columns declared in the same order, and
-** Has the same PRIMARY KEY definition.
-**
-**
-** If the tables are not compatible, SQLITE_SCHEMA is returned. If the tables
-** are compatible but do not have any PRIMARY KEY columns, it is not an error
-** but no changes are added to the session object. As with other session
-** APIs, tables without PRIMARY KEYs are simply ignored.
-**
-** This function adds a set of changes to the session object that could be
-** used to update the table in database zFrom (call this the "from-table")
-** so that its content is the same as the table attached to the session
-** object (call this the "to-table"). Specifically:
-**
-**
-** For each row (primary key) that exists in the to-table but not in
-** the from-table, an INSERT record is added to the session object.
-**
-** For each row (primary key) that exists in the to-table but not in
-** the from-table, a DELETE record is added to the session object.
-**
-** For each row (primary key) that exists in both tables, but features
-** different in each, an UPDATE record is added to the session.
-**
-**
-** To clarify, if this function is called and then a changeset constructed
-** using [sqlite3session_changeset()], then after applying that changeset to
-** database zFrom the contents of the two compatible tables would be
-** identical.
-**
-** It an error if database zFrom does not exist or does not contain the
-** required compatible table.
-**
-** If the operation successful, SQLITE_OK is returned. Otherwise, an SQLite
-** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg
-** may be set to point to a buffer containing an English language error
-** message. It is the responsibility of the caller to free this buffer using
-** sqlite3_free().
-*/
-int sqlite3session_diff(
- sqlite3_session *pSession,
- const char *zFromDb,
- const char *zTbl,
- char **pzErrMsg
-);
-
-
-/*
-** CAPI3REF: Generate A Patchset From A Session Object
-**
-** The differences between a patchset and a changeset are that:
-**
-**
-** DELETE records consist of the primary key fields only. The
-** original values of other fields are omitted.
-** The original values of any modified fields are omitted from
-** UPDATE records.
-**
-**
-** A patchset blob may be used with up to date versions of all
-** sqlite3changeset_xxx API functions except for sqlite3changeset_invert(),
-** which returns SQLITE_CORRUPT if it is passed a patchset. Similarly,
-** attempting to use a patchset blob with old versions of the
-** sqlite3changeset_xxx APIs also provokes an SQLITE_CORRUPT error.
-**
-** Because the non-primary key "old.*" fields are omitted, no
-** SQLITE_CHANGESET_DATA conflicts can be detected or reported if a patchset
-** is passed to the sqlite3changeset_apply() API. Other conflict types work
-** in the same way as for changesets.
-**
-** Changes within a patchset are ordered in the same way as for changesets
-** generated by the sqlite3session_changeset() function (i.e. all changes for
-** a single table are grouped together, tables appear in the order in which
-** they were attached to the session object).
-*/
-int sqlite3session_patchset(
- sqlite3_session *pSession, /* Session object */
- int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */
- void **ppPatchset /* OUT: Buffer containing changeset */
-);
-
-/*
-** CAPI3REF: Test if a changeset has recorded any changes.
-**
-** Return non-zero if no changes to attached tables have been recorded by
-** the session object passed as the first argument. Otherwise, if one or
-** more changes have been recorded, return zero.
-**
-** Even if this function returns zero, it is possible that calling
-** [sqlite3session_changeset()] on the session handle may still return a
-** changeset that contains no changes. This can happen when a row in
-** an attached table is modified and then later on the original values
-** are restored. However, if this function returns non-zero, then it is
-** guaranteed that a call to sqlite3session_changeset() will return a
-** changeset containing zero changes.
-*/
-int sqlite3session_isempty(sqlite3_session *pSession);
-
-/*
-** CAPI3REF: Create An Iterator To Traverse A Changeset
-**
-** Create an iterator used to iterate through the contents of a changeset.
-** If successful, *pp is set to point to the iterator handle and SQLITE_OK
-** is returned. Otherwise, if an error occurs, *pp is set to zero and an
-** SQLite error code is returned.
-**
-** The following functions can be used to advance and query a changeset
-** iterator created by this function:
-**
-**
-** [sqlite3changeset_next()]
-** [sqlite3changeset_op()]
-** [sqlite3changeset_new()]
-** [sqlite3changeset_old()]
-**
-**
-** It is the responsibility of the caller to eventually destroy the iterator
-** by passing it to [sqlite3changeset_finalize()]. The buffer containing the
-** changeset (pChangeset) must remain valid until after the iterator is
-** destroyed.
-**
-** Assuming the changeset blob was created by one of the
-** [sqlite3session_changeset()], [sqlite3changeset_concat()] or
-** [sqlite3changeset_invert()] functions, all changes within the changeset
-** that apply to a single table are grouped together. This means that when
-** an application iterates through a changeset using an iterator created by
-** this function, all changes that relate to a single table are visted
-** consecutively. There is no chance that the iterator will visit a change
-** the applies to table X, then one for table Y, and then later on visit
-** another change for table X.
-*/
-int sqlite3changeset_start(
- sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
- int nChangeset, /* Size of changeset blob in bytes */
- void *pChangeset /* Pointer to blob containing changeset */
-);
-
-
-/*
-** CAPI3REF: Advance A Changeset Iterator
-**
-** This function may only be used with iterators created by function
-** [sqlite3changeset_start()]. If it is called on an iterator passed to
-** a conflict-handler callback by [sqlite3changeset_apply()], SQLITE_MISUSE
-** is returned and the call has no effect.
-**
-** Immediately after an iterator is created by sqlite3changeset_start(), it
-** does not point to any change in the changeset. Assuming the changeset
-** is not empty, the first call to this function advances the iterator to
-** point to the first change in the changeset. Each subsequent call advances
-** the iterator to point to the next change in the changeset (if any). If
-** no error occurs and the iterator points to a valid change after a call
-** to sqlite3changeset_next() has advanced it, SQLITE_ROW is returned.
-** Otherwise, if all changes in the changeset have already been visited,
-** SQLITE_DONE is returned.
-**
-** If an error occurs, an SQLite error code is returned. Possible error
-** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or
-** SQLITE_NOMEM.
-*/
-int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
-
-/*
-** CAPI3REF: Obtain The Current Operation From A Changeset Iterator
-**
-** The pIter argument passed to this function may either be an iterator
-** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator
-** created by [sqlite3changeset_start()]. In the latter case, the most recent
-** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this
-** is not the case, this function returns [SQLITE_MISUSE].
-**
-** If argument pzTab is not NULL, then *pzTab is set to point to a
-** nul-terminated utf-8 encoded string containing the name of the table
-** affected by the current change. The buffer remains valid until either
-** sqlite3changeset_next() is called on the iterator or until the
-** conflict-handler function returns. If pnCol is not NULL, then *pnCol is
-** set to the number of columns in the table affected by the change. If
-** pbIncorrect is not NULL, then *pbIndirect is set to true (1) if the change
-** is an indirect change, or false (0) otherwise. See the documentation for
-** [sqlite3session_indirect()] for a description of direct and indirect
-** changes. Finally, if pOp is not NULL, then *pOp is set to one of
-** [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], depending on the
-** type of change that the iterator currently points to.
-**
-** If no error occurs, SQLITE_OK is returned. If an error does occur, an
-** SQLite error code is returned. The values of the output variables may not
-** be trusted in this case.
-*/
-int sqlite3changeset_op(
- sqlite3_changeset_iter *pIter, /* Iterator object */
- const char **pzTab, /* OUT: Pointer to table name */
- int *pnCol, /* OUT: Number of columns in table */
- int *pOp, /* OUT: SQLITE_INSERT, DELETE or UPDATE */
- int *pbIndirect /* OUT: True for an 'indirect' change */
-);
-
-/*
-** CAPI3REF: Obtain The Primary Key Definition Of A Table
-**
-** For each modified table, a changeset includes the following:
-**
-**
-** The number of columns in the table, and
-** Which of those columns make up the tables PRIMARY KEY.
-**
-**
-** This function is used to find which columns comprise the PRIMARY KEY of
-** the table modified by the change that iterator pIter currently points to.
-** If successful, *pabPK is set to point to an array of nCol entries, where
-** nCol is the number of columns in the table. Elements of *pabPK are set to
-** 0x01 if the corresponding column is part of the tables primary key, or
-** 0x00 if it is not.
-**
-** If argumet pnCol is not NULL, then *pnCol is set to the number of columns
-** in the table.
-**
-** If this function is called when the iterator does not point to a valid
-** entry, SQLITE_MISUSE is returned and the output variables zeroed. Otherwise,
-** SQLITE_OK is returned and the output variables populated as described
-** above.
-*/
-int sqlite3changeset_pk(
- sqlite3_changeset_iter *pIter, /* Iterator object */
- unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */
- int *pnCol /* OUT: Number of entries in output array */
-);
-
-/*
-** CAPI3REF: Obtain old.* Values From A Changeset Iterator
-**
-** The pIter argument passed to this function may either be an iterator
-** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator
-** created by [sqlite3changeset_start()]. In the latter case, the most recent
-** call to [sqlite3changeset_next()] must have returned SQLITE_ROW.
-** Furthermore, it may only be called if the type of change that the iterator
-** currently points to is either [SQLITE_DELETE] or [SQLITE_UPDATE]. Otherwise,
-** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL.
-**
-** Argument iVal must be greater than or equal to 0, and less than the number
-** of columns in the table affected by the current change. Otherwise,
-** [SQLITE_RANGE] is returned and *ppValue is set to NULL.
-**
-** If successful, this function sets *ppValue to point to a protected
-** sqlite3_value object containing the iVal'th value from the vector of
-** original row values stored as part of the UPDATE or DELETE change and
-** returns SQLITE_OK. The name of the function comes from the fact that this
-** is similar to the "old.*" columns available to update or delete triggers.
-**
-** If some other error occurs (e.g. an OOM condition), an SQLite error code
-** is returned and *ppValue is set to NULL.
-*/
-int sqlite3changeset_old(
- sqlite3_changeset_iter *pIter, /* Changeset iterator */
- int iVal, /* Column number */
- sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */
-);
-
-/*
-** CAPI3REF: Obtain new.* Values From A Changeset Iterator
-**
-** The pIter argument passed to this function may either be an iterator
-** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator
-** created by [sqlite3changeset_start()]. In the latter case, the most recent
-** call to [sqlite3changeset_next()] must have returned SQLITE_ROW.
-** Furthermore, it may only be called if the type of change that the iterator
-** currently points to is either [SQLITE_UPDATE] or [SQLITE_INSERT]. Otherwise,
-** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL.
-**
-** Argument iVal must be greater than or equal to 0, and less than the number
-** of columns in the table affected by the current change. Otherwise,
-** [SQLITE_RANGE] is returned and *ppValue is set to NULL.
-**
-** If successful, this function sets *ppValue to point to a protected
-** sqlite3_value object containing the iVal'th value from the vector of
-** new row values stored as part of the UPDATE or INSERT change and
-** returns SQLITE_OK. If the change is an UPDATE and does not include
-** a new value for the requested column, *ppValue is set to NULL and
-** SQLITE_OK returned. The name of the function comes from the fact that
-** this is similar to the "new.*" columns available to update or delete
-** triggers.
-**
-** If some other error occurs (e.g. an OOM condition), an SQLite error code
-** is returned and *ppValue is set to NULL.
-*/
-int sqlite3changeset_new(
- sqlite3_changeset_iter *pIter, /* Changeset iterator */
- int iVal, /* Column number */
- sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */
-);
-
-/*
-** CAPI3REF: Obtain Conflicting Row Values From A Changeset Iterator
-**
-** This function should only be used with iterator objects passed to a
-** conflict-handler callback by [sqlite3changeset_apply()] with either
-** [SQLITE_CHANGESET_DATA] or [SQLITE_CHANGESET_CONFLICT]. If this function
-** is called on any other iterator, [SQLITE_MISUSE] is returned and *ppValue
-** is set to NULL.
-**
-** Argument iVal must be greater than or equal to 0, and less than the number
-** of columns in the table affected by the current change. Otherwise,
-** [SQLITE_RANGE] is returned and *ppValue is set to NULL.
-**
-** If successful, this function sets *ppValue to point to a protected
-** sqlite3_value object containing the iVal'th value from the
-** "conflicting row" associated with the current conflict-handler callback
-** and returns SQLITE_OK.
-**
-** If some other error occurs (e.g. an OOM condition), an SQLite error code
-** is returned and *ppValue is set to NULL.
-*/
-int sqlite3changeset_conflict(
- sqlite3_changeset_iter *pIter, /* Changeset iterator */
- int iVal, /* Column number */
- sqlite3_value **ppValue /* OUT: Value from conflicting row */
-);
-
-/*
-** CAPI3REF: Determine The Number Of Foreign Key Constraint Violations
-**
-** This function may only be called with an iterator passed to an
-** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case
-** it sets the output variable to the total number of known foreign key
-** violations in the destination database and returns SQLITE_OK.
-**
-** In all other cases this function returns SQLITE_MISUSE.
-*/
-int sqlite3changeset_fk_conflicts(
- sqlite3_changeset_iter *pIter, /* Changeset iterator */
- int *pnOut /* OUT: Number of FK violations */
-);
-
-
-/*
-** CAPI3REF: Finalize A Changeset Iterator
-**
-** This function is used to finalize an iterator allocated with
-** [sqlite3changeset_start()].
-**
-** This function should only be called on iterators created using the
-** [sqlite3changeset_start()] function. If an application calls this
-** function with an iterator passed to a conflict-handler by
-** [sqlite3changeset_apply()], [SQLITE_MISUSE] is immediately returned and the
-** call has no effect.
-**
-** If an error was encountered within a call to an sqlite3changeset_xxx()
-** function (for example an [SQLITE_CORRUPT] in [sqlite3changeset_next()] or an
-** [SQLITE_NOMEM] in [sqlite3changeset_new()]) then an error code corresponding
-** to that error is returned by this function. Otherwise, SQLITE_OK is
-** returned. This is to allow the following pattern (pseudo-code):
-**
-** sqlite3changeset_start();
-** while( SQLITE_ROW==sqlite3changeset_next() ){
-** // Do something with change.
-** }
-** rc = sqlite3changeset_finalize();
-** if( rc!=SQLITE_OK ){
-** // An error has occurred
-** }
-*/
-int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter);
-
-/*
-** CAPI3REF: Invert A Changeset
-**
-** This function is used to "invert" a changeset object. Applying an inverted
-** changeset to a database reverses the effects of applying the uninverted
-** changeset. Specifically:
-**
-**
-** Each DELETE change is changed to an INSERT, and
-** Each INSERT change is changed to a DELETE, and
-** For each UPDATE change, the old.* and new.* values are exchanged.
-**
-**
-** This function does not change the order in which changes appear within
-** the changeset. It merely reverses the sense of each individual change.
-**
-** If successful, a pointer to a buffer containing the inverted changeset
-** is stored in *ppOut, the size of the same buffer is stored in *pnOut, and
-** SQLITE_OK is returned. If an error occurs, both *pnOut and *ppOut are
-** zeroed and an SQLite error code returned.
-**
-** It is the responsibility of the caller to eventually call sqlite3_free()
-** on the *ppOut pointer to free the buffer allocation following a successful
-** call to this function.
-**
-** WARNING/TODO: This function currently assumes that the input is a valid
-** changeset. If it is not, the results are undefined.
-*/
-int sqlite3changeset_invert(
- int nIn, const void *pIn, /* Input changeset */
- int *pnOut, void **ppOut /* OUT: Inverse of input */
-);
-
-/*
-** CAPI3REF: Concatenate Two Changeset Objects
-**
-** This function is used to concatenate two changesets, A and B, into a
-** single changeset. The result is a changeset equivalent to applying
-** changeset A followed by changeset B.
-**
-** This function combines the two input changesets using an
-** sqlite3_changegroup object. Calling it produces similar results as the
-** following code fragment:
-**
-** sqlite3_changegroup *pGrp;
-** rc = sqlite3_changegroup_new(&pGrp);
-** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nA, pA);
-** if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nB, pB);
-** if( rc==SQLITE_OK ){
-** rc = sqlite3changegroup_output(pGrp, pnOut, ppOut);
-** }else{
-** *ppOut = 0;
-** *pnOut = 0;
-** }
-**
-** Refer to the sqlite3_changegroup documentation below for details.
-*/
-int sqlite3changeset_concat(
- int nA, /* Number of bytes in buffer pA */
- void *pA, /* Pointer to buffer containing changeset A */
- int nB, /* Number of bytes in buffer pB */
- void *pB, /* Pointer to buffer containing changeset B */
- int *pnOut, /* OUT: Number of bytes in output changeset */
- void **ppOut /* OUT: Buffer containing output changeset */
-);
-
-
-/*
-** Changegroup handle.
-*/
-typedef struct sqlite3_changegroup sqlite3_changegroup;
-
-/*
-** CAPI3REF: Combine two or more changesets into a single changeset.
-**
-** An sqlite3_changegroup object is used to combine two or more changesets
-** (or patchsets) into a single changeset (or patchset). A single changegroup
-** object may combine changesets or patchsets, but not both. The output is
-** always in the same format as the input.
-**
-** If successful, this function returns SQLITE_OK and populates (*pp) with
-** a pointer to a new sqlite3_changegroup object before returning. The caller
-** should eventually free the returned object using a call to
-** sqlite3changegroup_delete(). If an error occurs, an SQLite error code
-** (i.e. SQLITE_NOMEM) is returned and *pp is set to NULL.
-**
-** The usual usage pattern for an sqlite3_changegroup object is as follows:
-**
-**
-** It is created using a call to sqlite3changegroup_new().
-**
-** Zero or more changesets (or patchsets) are added to the object
-** by calling sqlite3changegroup_add().
-**
-** The result of combining all input changesets together is obtained
-** by the application via a call to sqlite3changegroup_output().
-**
-** The object is deleted using a call to sqlite3changegroup_delete().
-**
-**
-** Any number of calls to add() and output() may be made between the calls to
-** new() and delete(), and in any order.
-**
-** As well as the regular sqlite3changegroup_add() and
-** sqlite3changegroup_output() functions, also available are the streaming
-** versions sqlite3changegroup_add_strm() and sqlite3changegroup_output_strm().
-*/
-int sqlite3changegroup_new(sqlite3_changegroup **pp);
-
-/*
-** Add all changes within the changeset (or patchset) in buffer pData (size
-** nData bytes) to the changegroup.
-**
-** If the buffer contains a patchset, then all prior calls to this function
-** on the same changegroup object must also have specified patchsets. Or, if
-** the buffer contains a changeset, so must have the earlier calls to this
-** function. Otherwise, SQLITE_ERROR is returned and no changes are added
-** to the changegroup.
-**
-** Rows within the changeset and changegroup are identified by the values in
-** their PRIMARY KEY columns. A change in the changeset is considered to
-** apply to the same row as a change already present in the changegroup if
-** the two rows have the same primary key.
-**
-** Changes to rows that that do not already appear in the changegroup are
-** simply copied into it. Or, if both the new changeset and the changegroup
-** contain changes that apply to a single row, the final contents of the
-** changegroup depends on the type of each change, as follows:
-**
-**
-** Existing Change
-** New Change
-** Output Change
-** INSERT INSERT
-** The new change is ignored. This case does not occur if the new
-** changeset was recorded immediately after the changesets already
-** added to the changegroup.
-** INSERT UPDATE
-** The INSERT change remains in the changegroup. The values in the
-** INSERT change are modified as if the row was inserted by the
-** existing change and then updated according to the new change.
-** INSERT DELETE
-** The existing INSERT is removed from the changegroup. The DELETE is
-** not added.
-** UPDATE INSERT
-** The new change is ignored. This case does not occur if the new
-** changeset was recorded immediately after the changesets already
-** added to the changegroup.
-** UPDATE UPDATE
-** The existing UPDATE remains within the changegroup. It is amended
-** so that the accompanying values are as if the row was updated once
-** by the existing change and then again by the new change.
-** UPDATE DELETE
-** The existing UPDATE is replaced by the new DELETE within the
-** changegroup.
-** DELETE INSERT
-** If one or more of the column values in the row inserted by the
-** new change differ from those in the row deleted by the existing
-** change, the existing DELETE is replaced by an UPDATE within the
-** changegroup. Otherwise, if the inserted row is exactly the same
-** as the deleted row, the existing DELETE is simply discarded.
-** DELETE UPDATE
-** The new change is ignored. This case does not occur if the new
-** changeset was recorded immediately after the changesets already
-** added to the changegroup.
-** DELETE DELETE
-** The new change is ignored. This case does not occur if the new
-** changeset was recorded immediately after the changesets already
-** added to the changegroup.
-**
-**
-** If the new changeset contains changes to a table that is already present
-** in the changegroup, then the number of columns and the position of the
-** primary key columns for the table must be consistent. If this is not the
-** case, this function fails with SQLITE_SCHEMA. If the input changeset
-** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is
-** returned. Or, if an out-of-memory condition occurs during processing, this
-** function returns SQLITE_NOMEM. In all cases, if an error occurs the
-** final contents of the changegroup is undefined.
-**
-** If no error occurs, SQLITE_OK is returned.
-*/
-int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
-
-/*
-** Obtain a buffer containing a changeset (or patchset) representing the
-** current contents of the changegroup. If the inputs to the changegroup
-** were themselves changesets, the output is a changeset. Or, if the
-** inputs were patchsets, the output is also a patchset.
-**
-** As with the output of the sqlite3session_changeset() and
-** sqlite3session_patchset() functions, all changes related to a single
-** table are grouped together in the output of this function. Tables appear
-** in the same order as for the very first changeset added to the changegroup.
-** If the second or subsequent changesets added to the changegroup contain
-** changes for tables that do not appear in the first changeset, they are
-** appended onto the end of the output changeset, again in the order in
-** which they are first encountered.
-**
-** If an error occurs, an SQLite error code is returned and the output
-** variables (*pnData) and (*ppData) are set to 0. Otherwise, SQLITE_OK
-** is returned and the output variables are set to the size of and a
-** pointer to the output buffer, respectively. In this case it is the
-** responsibility of the caller to eventually free the buffer using a
-** call to sqlite3_free().
-*/
-int sqlite3changegroup_output(
- sqlite3_changegroup*,
- int *pnData, /* OUT: Size of output buffer in bytes */
- void **ppData /* OUT: Pointer to output buffer */
-);
-
-/*
-** Delete a changegroup object.
-*/
-void sqlite3changegroup_delete(sqlite3_changegroup*);
-
-/*
-** CAPI3REF: Apply A Changeset To A Database
-**
-** Apply a changeset to a database. This function attempts to update the
-** "main" database attached to handle db with the changes found in the
-** changeset passed via the second and third arguments.
-**
-** The fourth argument (xFilter) passed to this function is the "filter
-** callback". If it is not NULL, then for each table affected by at least one
-** change in the changeset, the filter callback is invoked with
-** the table name as the second argument, and a copy of the context pointer
-** passed as the sixth argument to this function as the first. If the "filter
-** callback" returns zero, then no attempt is made to apply any changes to
-** the table. Otherwise, if the return value is non-zero or the xFilter
-** argument to this function is NULL, all changes related to the table are
-** attempted.
-**
-** For each table that is not excluded by the filter callback, this function
-** tests that the target database contains a compatible table. A table is
-** considered compatible if all of the following are true:
-**
-**
-** The table has the same name as the name recorded in the
-** changeset, and
-** The table has the same number of columns as recorded in the
-** changeset, and
-** The table has primary key columns in the same position as
-** recorded in the changeset.
-**
-**
-** If there is no compatible table, it is not an error, but none of the
-** changes associated with the table are applied. A warning message is issued
-** via the sqlite3_log() mechanism with the error code SQLITE_SCHEMA. At most
-** one such warning is issued for each table in the changeset.
-**
-** For each change for which there is a compatible table, an attempt is made
-** to modify the table contents according to the UPDATE, INSERT or DELETE
-** change. If a change cannot be applied cleanly, the conflict handler
-** function passed as the fifth argument to sqlite3changeset_apply() may be
-** invoked. A description of exactly when the conflict handler is invoked for
-** each type of change is below.
-**
-** Unlike the xFilter argument, xConflict may not be passed NULL. The results
-** of passing anything other than a valid function pointer as the xConflict
-** argument are undefined.
-**
-** Each time the conflict handler function is invoked, it must return one
-** of [SQLITE_CHANGESET_OMIT], [SQLITE_CHANGESET_ABORT] or
-** [SQLITE_CHANGESET_REPLACE]. SQLITE_CHANGESET_REPLACE may only be returned
-** if the second argument passed to the conflict handler is either
-** SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If the conflict-handler
-** returns an illegal value, any changes already made are rolled back and
-** the call to sqlite3changeset_apply() returns SQLITE_MISUSE. Different
-** actions are taken by sqlite3changeset_apply() depending on the value
-** returned by each invocation of the conflict-handler function. Refer to
-** the documentation for the three
-** [SQLITE_CHANGESET_OMIT|available return values] for details.
-**
-**
-** DELETE Changes
-** For each DELETE change, this function checks if the target database
-** contains a row with the same primary key value (or values) as the
-** original row values stored in the changeset. If it does, and the values
-** stored in all non-primary key columns also match the values stored in
-** the changeset the row is deleted from the target database.
-**
-** If a row with matching primary key values is found, but one or more of
-** the non-primary key fields contains a value different from the original
-** row value stored in the changeset, the conflict-handler function is
-** invoked with [SQLITE_CHANGESET_DATA] as the second argument.
-**
-** If no row with matching primary key values is found in the database,
-** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND]
-** passed as the second argument.
-**
-** If the DELETE operation is attempted, but SQLite returns SQLITE_CONSTRAINT
-** (which can only happen if a foreign key constraint is violated), the
-** conflict-handler function is invoked with [SQLITE_CHANGESET_CONSTRAINT]
-** passed as the second argument. This includes the case where the DELETE
-** operation is attempted because an earlier call to the conflict handler
-** function returned [SQLITE_CHANGESET_REPLACE].
-**
-** INSERT Changes
-** For each INSERT change, an attempt is made to insert the new row into
-** the database.
-**
-** If the attempt to insert the row fails because the database already
-** contains a row with the same primary key values, the conflict handler
-** function is invoked with the second argument set to
-** [SQLITE_CHANGESET_CONFLICT].
-**
-** If the attempt to insert the row fails because of some other constraint
-** violation (e.g. NOT NULL or UNIQUE), the conflict handler function is
-** invoked with the second argument set to [SQLITE_CHANGESET_CONSTRAINT].
-** This includes the case where the INSERT operation is re-attempted because
-** an earlier call to the conflict handler function returned
-** [SQLITE_CHANGESET_REPLACE].
-**
-** UPDATE Changes
-** For each UPDATE change, this function checks if the target database
-** contains a row with the same primary key value (or values) as the
-** original row values stored in the changeset. If it does, and the values
-** stored in all non-primary key columns also match the values stored in
-** the changeset the row is updated within the target database.
-**
-** If a row with matching primary key values is found, but one or more of
-** the non-primary key fields contains a value different from an original
-** row value stored in the changeset, the conflict-handler function is
-** invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since
-** UPDATE changes only contain values for non-primary key fields that are
-** to be modified, only those fields need to match the original values to
-** avoid the SQLITE_CHANGESET_DATA conflict-handler callback.
-**
-** If no row with matching primary key values is found in the database,
-** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND]
-** passed as the second argument.
-**
-** If the UPDATE operation is attempted, but SQLite returns
-** SQLITE_CONSTRAINT, the conflict-handler function is invoked with
-** [SQLITE_CHANGESET_CONSTRAINT] passed as the second argument.
-** This includes the case where the UPDATE operation is attempted after
-** an earlier call to the conflict handler function returned
-** [SQLITE_CHANGESET_REPLACE].
-**
-**
-** It is safe to execute SQL statements, including those that write to the
-** table that the callback related to, from within the xConflict callback.
-** This can be used to further customize the applications conflict
-** resolution strategy.
-**
-** All changes made by this function are enclosed in a savepoint transaction.
-** If any other error (aside from a constraint failure when attempting to
-** write to the target database) occurs, then the savepoint transaction is
-** rolled back, restoring the target database to its original state, and an
-** SQLite error code returned.
-*/
-int sqlite3changeset_apply(
- sqlite3 *db, /* Apply change to "main" db of this handle */
- int nChangeset, /* Size of changeset in bytes */
- void *pChangeset, /* Changeset blob */
- int(*xFilter)(
- void *pCtx, /* Copy of sixth arg to _apply() */
- const char *zTab /* Table name */
- ),
- int(*xConflict)(
- void *pCtx, /* Copy of sixth arg to _apply() */
- int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
- sqlite3_changeset_iter *p /* Handle describing change and conflict */
- ),
- void *pCtx /* First argument passed to xConflict */
-);
-
-/*
-** CAPI3REF: Constants Passed To The Conflict Handler
-**
-** Values that may be passed as the second argument to a conflict-handler.
-**
-**
-** SQLITE_CHANGESET_DATA
-** The conflict handler is invoked with CHANGESET_DATA as the second argument
-** when processing a DELETE or UPDATE change if a row with the required
-** PRIMARY KEY fields is present in the database, but one or more other
-** (non primary-key) fields modified by the update do not contain the
-** expected "before" values.
-**
-** The conflicting row, in this case, is the database row with the matching
-** primary key.
-**
-** SQLITE_CHANGESET_NOTFOUND
-** The conflict handler is invoked with CHANGESET_NOTFOUND as the second
-** argument when processing a DELETE or UPDATE change if a row with the
-** required PRIMARY KEY fields is not present in the database.
-**
-** There is no conflicting row in this case. The results of invoking the
-** sqlite3changeset_conflict() API are undefined.
-**
-** SQLITE_CHANGESET_CONFLICT
-** CHANGESET_CONFLICT is passed as the second argument to the conflict
-** handler while processing an INSERT change if the operation would result
-** in duplicate primary key values.
-**
-** The conflicting row in this case is the database row with the matching
-** primary key.
-**
-** SQLITE_CHANGESET_FOREIGN_KEY
-** If foreign key handling is enabled, and applying a changeset leaves the
-** database in a state containing foreign key violations, the conflict
-** handler is invoked with CHANGESET_FOREIGN_KEY as the second argument
-** exactly once before the changeset is committed. If the conflict handler
-** returns CHANGESET_OMIT, the changes, including those that caused the
-** foreign key constraint violation, are committed. Or, if it returns
-** CHANGESET_ABORT, the changeset is rolled back.
-**
-** No current or conflicting row information is provided. The only function
-** it is possible to call on the supplied sqlite3_changeset_iter handle
-** is sqlite3changeset_fk_conflicts().
-**
-** SQLITE_CHANGESET_CONSTRAINT
-** If any other constraint violation occurs while applying a change (i.e.
-** a UNIQUE, CHECK or NOT NULL constraint), the conflict handler is
-** invoked with CHANGESET_CONSTRAINT as the second argument.
-**
-** There is no conflicting row in this case. The results of invoking the
-** sqlite3changeset_conflict() API are undefined.
-**
-**
-*/
-#define SQLITE_CHANGESET_DATA 1
-#define SQLITE_CHANGESET_NOTFOUND 2
-#define SQLITE_CHANGESET_CONFLICT 3
-#define SQLITE_CHANGESET_CONSTRAINT 4
-#define SQLITE_CHANGESET_FOREIGN_KEY 5
-
-/*
-** CAPI3REF: Constants Returned By The Conflict Handler
-**
-** A conflict handler callback must return one of the following three values.
-**
-**
-** SQLITE_CHANGESET_OMIT
-** If a conflict handler returns this value no special action is taken. The
-** change that caused the conflict is not applied. The session module
-** continues to the next change in the changeset.
-**
-** SQLITE_CHANGESET_REPLACE
-** This value may only be returned if the second argument to the conflict
-** handler was SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If this
-** is not the case, any changes applied so far are rolled back and the
-** call to sqlite3changeset_apply() returns SQLITE_MISUSE.
-**
-** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_DATA conflict
-** handler, then the conflicting row is either updated or deleted, depending
-** on the type of change.
-**
-** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_CONFLICT conflict
-** handler, then the conflicting row is removed from the database and a
-** second attempt to apply the change is made. If this second attempt fails,
-** the original row is restored to the database before continuing.
-**
-** SQLITE_CHANGESET_ABORT
-** If this value is returned, any changes applied so far are rolled back
-** and the call to sqlite3changeset_apply() returns SQLITE_ABORT.
-**
-*/
-#define SQLITE_CHANGESET_OMIT 0
-#define SQLITE_CHANGESET_REPLACE 1
-#define SQLITE_CHANGESET_ABORT 2
-
-/*
-** CAPI3REF: Streaming Versions of API functions.
-**
-** The six streaming API xxx_strm() functions serve similar purposes to the
-** corresponding non-streaming API functions:
-**
-**
-** Streaming function Non-streaming equivalent
-** sqlite3changeset_apply_str [sqlite3changeset_apply]
-** sqlite3changeset_concat_str [sqlite3changeset_concat]
-** sqlite3changeset_invert_str [sqlite3changeset_invert]
-** sqlite3changeset_start_str [sqlite3changeset_start]
-** sqlite3session_changeset_str [sqlite3session_changeset]
-** sqlite3session_patchset_str [sqlite3session_patchset]
-**
-**
-** Non-streaming functions that accept changesets (or patchsets) as input
-** require that the entire changeset be stored in a single buffer in memory.
-** Similarly, those that return a changeset or patchset do so by returning
-** a pointer to a single large buffer allocated using sqlite3_malloc().
-** Normally this is convenient. However, if an application running in a
-** low-memory environment is required to handle very large changesets, the
-** large contiguous memory allocations required can become onerous.
-**
-** In order to avoid this problem, instead of a single large buffer, input
-** is passed to a streaming API functions by way of a callback function that
-** the sessions module invokes to incrementally request input data as it is
-** required. In all cases, a pair of API function parameters such as
-**
-**
-** int nChangeset,
-** void *pChangeset,
-**
-**
-** Is replaced by:
-**
-**
-** int (*xInput)(void *pIn, void *pData, int *pnData),
-** void *pIn,
-**
-**
-** Each time the xInput callback is invoked by the sessions module, the first
-** argument passed is a copy of the supplied pIn context pointer. The second
-** argument, pData, points to a buffer (*pnData) bytes in size. Assuming no
-** error occurs the xInput method should copy up to (*pnData) bytes of data
-** into the buffer and set (*pnData) to the actual number of bytes copied
-** before returning SQLITE_OK. If the input is completely exhausted, (*pnData)
-** should be set to zero to indicate this. Or, if an error occurs, an SQLite
-** error code should be returned. In all cases, if an xInput callback returns
-** an error, all processing is abandoned and the streaming API function
-** returns a copy of the error code to the caller.
-**
-** In the case of sqlite3changeset_start_strm(), the xInput callback may be
-** invoked by the sessions module at any point during the lifetime of the
-** iterator. If such an xInput callback returns an error, the iterator enters
-** an error state, whereby all subsequent calls to iterator functions
-** immediately fail with the same error code as returned by xInput.
-**
-** Similarly, streaming API functions that return changesets (or patchsets)
-** return them in chunks by way of a callback function instead of via a
-** pointer to a single large buffer. In this case, a pair of parameters such
-** as:
-**
-**
-** int *pnChangeset,
-** void **ppChangeset,
-**
-**
-** Is replaced by:
-**
-**
-** int (*xOutput)(void *pOut, const void *pData, int nData),
-** void *pOut
-**
-**
-** The xOutput callback is invoked zero or more times to return data to
-** the application. The first parameter passed to each call is a copy of the
-** pOut pointer supplied by the application. The second parameter, pData,
-** points to a buffer nData bytes in size containing the chunk of output
-** data being returned. If the xOutput callback successfully processes the
-** supplied data, it should return SQLITE_OK to indicate success. Otherwise,
-** it should return some other SQLite error code. In this case processing
-** is immediately abandoned and the streaming API function returns a copy
-** of the xOutput error code to the application.
-**
-** The sessions module never invokes an xOutput callback with the third
-** parameter set to a value less than or equal to zero. Other than this,
-** no guarantees are made as to the size of the chunks of data returned.
-*/
-int sqlite3changeset_apply_strm(
- sqlite3 *db, /* Apply change to "main" db of this handle */
- int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */
- void *pIn, /* First arg for xInput */
- int(*xFilter)(
- void *pCtx, /* Copy of sixth arg to _apply() */
- const char *zTab /* Table name */
- ),
- int(*xConflict)(
- void *pCtx, /* Copy of sixth arg to _apply() */
- int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */
- sqlite3_changeset_iter *p /* Handle describing change and conflict */
- ),
- void *pCtx /* First argument passed to xConflict */
-);
-int sqlite3changeset_concat_strm(
- int (*xInputA)(void *pIn, void *pData, int *pnData),
- void *pInA,
- int (*xInputB)(void *pIn, void *pData, int *pnData),
- void *pInB,
- int (*xOutput)(void *pOut, const void *pData, int nData),
- void *pOut
-);
-int sqlite3changeset_invert_strm(
- int (*xInput)(void *pIn, void *pData, int *pnData),
- void *pIn,
- int (*xOutput)(void *pOut, const void *pData, int nData),
- void *pOut
-);
-int sqlite3changeset_start_strm(
- sqlite3_changeset_iter **pp,
- int (*xInput)(void *pIn, void *pData, int *pnData),
- void *pIn
-);
-int sqlite3session_changeset_strm(
- sqlite3_session *pSession,
- int (*xOutput)(void *pOut, const void *pData, int nData),
- void *pOut
-);
-int sqlite3session_patchset_strm(
- sqlite3_session *pSession,
- int (*xOutput)(void *pOut, const void *pData, int nData),
- void *pOut
-);
-int sqlite3changegroup_add_strm(sqlite3_changegroup*,
- int (*xInput)(void *pIn, void *pData, int *pnData),
- void *pIn
-);
-int sqlite3changegroup_output_strm(sqlite3_changegroup*,
- int (*xOutput)(void *pOut, const void *pData, int nData),
- void *pOut
-);
-
-
-/*
-** Make sure we can call this stuff from C++.
-*/
-#if 0
-}
-#endif
-
-#endif /* !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) */
-
-/******** End of sqlite3session.h *********/
-/******** Begin file fts5.h *********/
-/*
-** 2014 May 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** Interfaces to extend FTS5. Using the interfaces defined in this file,
-** FTS5 may be extended with:
-**
-** * custom tokenizers, and
-** * custom auxiliary functions.
-*/
-
-
-#ifndef _FTS5_H
-#define _FTS5_H
-
-
-#if 0
-extern "C" {
-#endif
-
-/*************************************************************************
-** CUSTOM AUXILIARY FUNCTIONS
-**
-** Virtual table implementations may overload SQL functions by implementing
-** the sqlite3_module.xFindFunction() method.
-*/
-
-typedef struct Fts5ExtensionApi Fts5ExtensionApi;
-typedef struct Fts5Context Fts5Context;
-typedef struct Fts5PhraseIter Fts5PhraseIter;
-
-typedef void (*fts5_extension_function)(
- const Fts5ExtensionApi *pApi, /* API offered by current FTS version */
- Fts5Context *pFts, /* First arg to pass to pApi functions */
- sqlite3_context *pCtx, /* Context for returning result/error */
- int nVal, /* Number of values in apVal[] array */
- sqlite3_value **apVal /* Array of trailing arguments */
-);
-
-struct Fts5PhraseIter {
- const unsigned char *a;
- const unsigned char *b;
-};
-
-/*
-** EXTENSION API FUNCTIONS
-**
-** xUserData(pFts):
-** Return a copy of the context pointer the extension function was
-** registered with.
-**
-** xColumnTotalSize(pFts, iCol, pnToken):
-** If parameter iCol is less than zero, set output variable *pnToken
-** to the total number of tokens in the FTS5 table. Or, if iCol is
-** non-negative but less than the number of columns in the table, return
-** the total number of tokens in column iCol, considering all rows in
-** the FTS5 table.
-**
-** If parameter iCol is greater than or equal to the number of columns
-** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
-** an OOM condition or IO error), an appropriate SQLite error code is
-** returned.
-**
-** xColumnCount(pFts):
-** Return the number of columns in the table.
-**
-** xColumnSize(pFts, iCol, pnToken):
-** If parameter iCol is less than zero, set output variable *pnToken
-** to the total number of tokens in the current row. Or, if iCol is
-** non-negative but less than the number of columns in the table, set
-** *pnToken to the number of tokens in column iCol of the current row.
-**
-** If parameter iCol is greater than or equal to the number of columns
-** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g.
-** an OOM condition or IO error), an appropriate SQLite error code is
-** returned.
-**
-** This function may be quite inefficient if used with an FTS5 table
-** created with the "columnsize=0" option.
-**
-** xColumnText:
-** This function attempts to retrieve the text of column iCol of the
-** current document. If successful, (*pz) is set to point to a buffer
-** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
-** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
-** if an error occurs, an SQLite error code is returned and the final values
-** of (*pz) and (*pn) are undefined.
-**
-** xPhraseCount:
-** Returns the number of phrases in the current query expression.
-**
-** xPhraseSize:
-** Returns the number of tokens in phrase iPhrase of the query. Phrases
-** are numbered starting from zero.
-**
-** xInstCount:
-** Set *pnInst to the total number of occurrences of all phrases within
-** the query within the current row. Return SQLITE_OK if successful, or
-** an error code (i.e. SQLITE_NOMEM) if an error occurs.
-**
-** This API can be quite slow if used with an FTS5 table created with the
-** "detail=none" or "detail=column" option. If the FTS5 table is created
-** with either "detail=none" or "detail=column" and "content=" option
-** (i.e. if it is a contentless table), then this API always returns 0.
-**
-** xInst:
-** Query for the details of phrase match iIdx within the current row.
-** Phrase matches are numbered starting from zero, so the iIdx argument
-** should be greater than or equal to zero and smaller than the value
-** output by xInstCount().
-**
-** Usually, output parameter *piPhrase is set to the phrase number, *piCol
-** to the column in which it occurs and *piOff the token offset of the
-** first token of the phrase. The exception is if the table was created
-** with the offsets=0 option specified. In this case *piOff is always
-** set to -1.
-**
-** Returns SQLITE_OK if successful, or an error code (i.e. SQLITE_NOMEM)
-** if an error occurs.
-**
-** This API can be quite slow if used with an FTS5 table created with the
-** "detail=none" or "detail=column" option.
-**
-** xRowid:
-** Returns the rowid of the current row.
-**
-** xTokenize:
-** Tokenize text using the tokenizer belonging to the FTS5 table.
-**
-** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback):
-** This API function is used to query the FTS table for phrase iPhrase
-** of the current query. Specifically, a query equivalent to:
-**
-** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid
-**
-** with $p set to a phrase equivalent to the phrase iPhrase of the
-** current query is executed. Any column filter that applies to
-** phrase iPhrase of the current query is included in $p. For each
-** row visited, the callback function passed as the fourth argument
-** is invoked. The context and API objects passed to the callback
-** function may be used to access the properties of each matched row.
-** Invoking Api.xUserData() returns a copy of the pointer passed as
-** the third argument to pUserData.
-**
-** If the callback function returns any value other than SQLITE_OK, the
-** query is abandoned and the xQueryPhrase function returns immediately.
-** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
-** Otherwise, the error code is propagated upwards.
-**
-** If the query runs to completion without incident, SQLITE_OK is returned.
-** Or, if some error occurs before the query completes or is aborted by
-** the callback, an SQLite error code is returned.
-**
-**
-** xSetAuxdata(pFts5, pAux, xDelete)
-**
-** Save the pointer passed as the second argument as the extension functions
-** "auxiliary data". The pointer may then be retrieved by the current or any
-** future invocation of the same fts5 extension function made as part of
-** of the same MATCH query using the xGetAuxdata() API.
-**
-** Each extension function is allocated a single auxiliary data slot for
-** each FTS query (MATCH expression). If the extension function is invoked
-** more than once for a single FTS query, then all invocations share a
-** single auxiliary data context.
-**
-** If there is already an auxiliary data pointer when this function is
-** invoked, then it is replaced by the new pointer. If an xDelete callback
-** was specified along with the original pointer, it is invoked at this
-** point.
-**
-** The xDelete callback, if one is specified, is also invoked on the
-** auxiliary data pointer after the FTS5 query has finished.
-**
-** If an error (e.g. an OOM condition) occurs within this function, an
-** the auxiliary data is set to NULL and an error code returned. If the
-** xDelete parameter was not NULL, it is invoked on the auxiliary data
-** pointer before returning.
-**
-**
-** xGetAuxdata(pFts5, bClear)
-**
-** Returns the current auxiliary data pointer for the fts5 extension
-** function. See the xSetAuxdata() method for details.
-**
-** If the bClear argument is non-zero, then the auxiliary data is cleared
-** (set to NULL) before this function returns. In this case the xDelete,
-** if any, is not invoked.
-**
-**
-** xRowCount(pFts5, pnRow)
-**
-** This function is used to retrieve the total number of rows in the table.
-** In other words, the same value that would be returned by:
-**
-** SELECT count(*) FROM ftstable;
-**
-** xPhraseFirst()
-** This function is used, along with type Fts5PhraseIter and the xPhraseNext
-** method, to iterate through all instances of a single query phrase within
-** the current row. This is the same information as is accessible via the
-** xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient
-** to use, this API may be faster under some circumstances. To iterate
-** through instances of phrase iPhrase, use the following code:
-**
-** Fts5PhraseIter iter;
-** int iCol, iOff;
-** for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff);
-** iCol>=0;
-** pApi->xPhraseNext(pFts, &iter, &iCol, &iOff)
-** ){
-** // An instance of phrase iPhrase at offset iOff of column iCol
-** }
-**
-** The Fts5PhraseIter structure is defined above. Applications should not
-** modify this structure directly - it should only be used as shown above
-** with the xPhraseFirst() and xPhraseNext() API methods (and by
-** xPhraseFirstColumn() and xPhraseNextColumn() as illustrated below).
-**
-** This API can be quite slow if used with an FTS5 table created with the
-** "detail=none" or "detail=column" option. If the FTS5 table is created
-** with either "detail=none" or "detail=column" and "content=" option
-** (i.e. if it is a contentless table), then this API always iterates
-** through an empty set (all calls to xPhraseFirst() set iCol to -1).
-**
-** xPhraseNext()
-** See xPhraseFirst above.
-**
-** xPhraseFirstColumn()
-** This function and xPhraseNextColumn() are similar to the xPhraseFirst()
-** and xPhraseNext() APIs described above. The difference is that instead
-** of iterating through all instances of a phrase in the current row, these
-** APIs are used to iterate through the set of columns in the current row
-** that contain one or more instances of a specified phrase. For example:
-**
-** Fts5PhraseIter iter;
-** int iCol;
-** for(pApi->xPhraseFirstColumn(pFts, iPhrase, &iter, &iCol);
-** iCol>=0;
-** pApi->xPhraseNextColumn(pFts, &iter, &iCol)
-** ){
-** // Column iCol contains at least one instance of phrase iPhrase
-** }
-**
-** This API can be quite slow if used with an FTS5 table created with the
-** "detail=none" option. If the FTS5 table is created with either
-** "detail=none" "content=" option (i.e. if it is a contentless table),
-** then this API always iterates through an empty set (all calls to
-** xPhraseFirstColumn() set iCol to -1).
-**
-** The information accessed using this API and its companion
-** xPhraseFirstColumn() may also be obtained using xPhraseFirst/xPhraseNext
-** (or xInst/xInstCount). The chief advantage of this API is that it is
-** significantly more efficient than those alternatives when used with
-** "detail=column" tables.
-**
-** xPhraseNextColumn()
-** See xPhraseFirstColumn above.
-*/
-struct Fts5ExtensionApi {
- int iVersion; /* Currently always set to 3 */
-
- void *(*xUserData)(Fts5Context*);
-
- int (*xColumnCount)(Fts5Context*);
- int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow);
- int (*xColumnTotalSize)(Fts5Context*, int iCol, sqlite3_int64 *pnToken);
-
- int (*xTokenize)(Fts5Context*,
- const char *pText, int nText, /* Text to tokenize */
- void *pCtx, /* Context passed to xToken() */
- int (*xToken)(void*, int, const char*, int, int, int) /* Callback */
- );
-
- int (*xPhraseCount)(Fts5Context*);
- int (*xPhraseSize)(Fts5Context*, int iPhrase);
-
- int (*xInstCount)(Fts5Context*, int *pnInst);
- int (*xInst)(Fts5Context*, int iIdx, int *piPhrase, int *piCol, int *piOff);
-
- sqlite3_int64 (*xRowid)(Fts5Context*);
- int (*xColumnText)(Fts5Context*, int iCol, const char **pz, int *pn);
- int (*xColumnSize)(Fts5Context*, int iCol, int *pnToken);
-
- int (*xQueryPhrase)(Fts5Context*, int iPhrase, void *pUserData,
- int(*)(const Fts5ExtensionApi*,Fts5Context*,void*)
- );
- int (*xSetAuxdata)(Fts5Context*, void *pAux, void(*xDelete)(void*));
- void *(*xGetAuxdata)(Fts5Context*, int bClear);
-
- int (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*);
- void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff);
-
- int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
- void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
-};
-
-/*
-** CUSTOM AUXILIARY FUNCTIONS
-*************************************************************************/
-
-/*************************************************************************
-** CUSTOM TOKENIZERS
-**
-** Applications may also register custom tokenizer types. A tokenizer
-** is registered by providing fts5 with a populated instance of the
-** following structure. All structure methods must be defined, setting
-** any member of the fts5_tokenizer struct to NULL leads to undefined
-** behaviour. The structure methods are expected to function as follows:
-**
-** xCreate:
-** This function is used to allocate and initialize a tokenizer instance.
-** A tokenizer instance is required to actually tokenize text.
-**
-** The first argument passed to this function is a copy of the (void*)
-** pointer provided by the application when the fts5_tokenizer object
-** was registered with FTS5 (the third argument to xCreateTokenizer()).
-** The second and third arguments are an array of nul-terminated strings
-** containing the tokenizer arguments, if any, specified following the
-** tokenizer name as part of the CREATE VIRTUAL TABLE statement used
-** to create the FTS5 table.
-**
-** The final argument is an output variable. If successful, (*ppOut)
-** should be set to point to the new tokenizer handle and SQLITE_OK
-** returned. If an error occurs, some value other than SQLITE_OK should
-** be returned. In this case, fts5 assumes that the final value of *ppOut
-** is undefined.
-**
-** xDelete:
-** This function is invoked to delete a tokenizer handle previously
-** allocated using xCreate(). Fts5 guarantees that this function will
-** be invoked exactly once for each successful call to xCreate().
-**
-** xTokenize:
-** This function is expected to tokenize the nText byte string indicated
-** by argument pText. pText may or may not be nul-terminated. The first
-** argument passed to this function is a pointer to an Fts5Tokenizer object
-** returned by an earlier call to xCreate().
-**
-** The second argument indicates the reason that FTS5 is requesting
-** tokenization of the supplied text. This is always one of the following
-** four values:
-**
-** FTS5_TOKENIZE_DOCUMENT - A document is being inserted into
-** or removed from the FTS table. The tokenizer is being invoked to
-** determine the set of tokens to add to (or delete from) the
-** FTS index.
-**
-** FTS5_TOKENIZE_QUERY - A MATCH query is being executed
-** against the FTS index. The tokenizer is being called to tokenize
-** a bareword or quoted string specified as part of the query.
-**
-** (FTS5_TOKENIZE_QUERY | FTS5_TOKENIZE_PREFIX) - Same as
-** FTS5_TOKENIZE_QUERY, except that the bareword or quoted string is
-** followed by a "*" character, indicating that the last token
-** returned by the tokenizer will be treated as a token prefix.
-**
-** FTS5_TOKENIZE_AUX - The tokenizer is being invoked to
-** satisfy an fts5_api.xTokenize() request made by an auxiliary
-** function. Or an fts5_api.xColumnSize() request made by the same
-** on a columnsize=0 database.
-**
-**
-** For each token in the input string, the supplied callback xToken() must
-** be invoked. The first argument to it should be a copy of the pointer
-** passed as the second argument to xTokenize(). The third and fourth
-** arguments are a pointer to a buffer containing the token text, and the
-** size of the token in bytes. The 4th and 5th arguments are the byte offsets
-** of the first byte of and first byte immediately following the text from
-** which the token is derived within the input.
-**
-** The second argument passed to the xToken() callback ("tflags") should
-** normally be set to 0. The exception is if the tokenizer supports
-** synonyms. In this case see the discussion below for details.
-**
-** FTS5 assumes the xToken() callback is invoked for each token in the
-** order that they occur within the input text.
-**
-** If an xToken() callback returns any value other than SQLITE_OK, then
-** the tokenization should be abandoned and the xTokenize() method should
-** immediately return a copy of the xToken() return value. Or, if the
-** input buffer is exhausted, xTokenize() should return SQLITE_OK. Finally,
-** if an error occurs with the xTokenize() implementation itself, it
-** may abandon the tokenization and return any error code other than
-** SQLITE_OK or SQLITE_DONE.
-**
-** SYNONYM SUPPORT
-**
-** Custom tokenizers may also support synonyms. Consider a case in which a
-** user wishes to query for a phrase such as "first place". Using the
-** built-in tokenizers, the FTS5 query 'first + place' will match instances
-** of "first place" within the document set, but not alternative forms
-** such as "1st place". In some applications, it would be better to match
-** all instances of "first place" or "1st place" regardless of which form
-** the user specified in the MATCH query text.
-**
-** There are several ways to approach this in FTS5:
-**
-** By mapping all synonyms to a single token. In this case, the
-** In the above example, this means that the tokenizer returns the
-** same token for inputs "first" and "1st". Say that token is in
-** fact "first", so that when the user inserts the document "I won
-** 1st place" entries are added to the index for tokens "i", "won",
-** "first" and "place". If the user then queries for '1st + place',
-** the tokenizer substitutes "first" for "1st" and the query works
-** as expected.
-**
-** By adding multiple synonyms for a single term to the FTS index.
-** In this case, when tokenizing query text, the tokenizer may
-** provide multiple synonyms for a single term within the document.
-** FTS5 then queries the index for each synonym individually. For
-** example, faced with the query:
-**
-**
-** ... MATCH 'first place'
-**
-** the tokenizer offers both "1st" and "first" as synonyms for the
-** first token in the MATCH query and FTS5 effectively runs a query
-** similar to:
-**
-**
-** ... MATCH '(first OR 1st) place'
-**
-** except that, for the purposes of auxiliary functions, the query
-** still appears to contain just two phrases - "(first OR 1st)"
-** being treated as a single phrase.
-**
-** By adding multiple synonyms for a single term to the FTS index.
-** Using this method, when tokenizing document text, the tokenizer
-** provides multiple synonyms for each token. So that when a
-** document such as "I won first place" is tokenized, entries are
-** added to the FTS index for "i", "won", "first", "1st" and
-** "place".
-**
-** This way, even if the tokenizer does not provide synonyms
-** when tokenizing query text (it should not - to do would be
-** inefficient), it doesn't matter if the user queries for
-** 'first + place' or '1st + place', as there are entires in the
-** FTS index corresponding to both forms of the first token.
-**
-**
-** Whether it is parsing document or query text, any call to xToken that
-** specifies a tflags argument with the FTS5_TOKEN_COLOCATED bit
-** is considered to supply a synonym for the previous token. For example,
-** when parsing the document "I won first place", a tokenizer that supports
-** synonyms would call xToken() 5 times, as follows:
-**
-**
-** xToken(pCtx, 0, "i", 1, 0, 1);
-** xToken(pCtx, 0, "won", 3, 2, 5);
-** xToken(pCtx, 0, "first", 5, 6, 11);
-** xToken(pCtx, FTS5_TOKEN_COLOCATED, "1st", 3, 6, 11);
-** xToken(pCtx, 0, "place", 5, 12, 17);
-**
-**
-** It is an error to specify the FTS5_TOKEN_COLOCATED flag the first time
-** xToken() is called. Multiple synonyms may be specified for a single token
-** by making multiple calls to xToken(FTS5_TOKEN_COLOCATED) in sequence.
-** There is no limit to the number of synonyms that may be provided for a
-** single token.
-**
-** In many cases, method (1) above is the best approach. It does not add
-** extra data to the FTS index or require FTS5 to query for multiple terms,
-** so it is efficient in terms of disk space and query speed. However, it
-** does not support prefix queries very well. If, as suggested above, the
-** token "first" is subsituted for "1st" by the tokenizer, then the query:
-**
-**
-** ... MATCH '1s*'
-**
-** will not match documents that contain the token "1st" (as the tokenizer
-** will probably not map "1s" to any prefix of "first").
-**
-** For full prefix support, method (3) may be preferred. In this case,
-** because the index contains entries for both "first" and "1st", prefix
-** queries such as 'fi*' or '1s*' will match correctly. However, because
-** extra entries are added to the FTS index, this method uses more space
-** within the database.
-**
-** Method (2) offers a midpoint between (1) and (3). Using this method,
-** a query such as '1s*' will match documents that contain the literal
-** token "1st", but not "first" (assuming the tokenizer is not able to
-** provide synonyms for prefixes). However, a non-prefix query like '1st'
-** will match against "1st" and "first". This method does not require
-** extra disk space, as no extra entries are added to the FTS index.
-** On the other hand, it may require more CPU cycles to run MATCH queries,
-** as separate queries of the FTS index are required for each synonym.
-**
-** When using methods (2) or (3), it is important that the tokenizer only
-** provide synonyms when tokenizing document text (method (2)) or query
-** text (method (3)), not both. Doing so will not cause any errors, but is
-** inefficient.
-*/
-typedef struct Fts5Tokenizer Fts5Tokenizer;
-typedef struct fts5_tokenizer fts5_tokenizer;
-struct fts5_tokenizer {
- int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut);
- void (*xDelete)(Fts5Tokenizer*);
- int (*xTokenize)(Fts5Tokenizer*,
- void *pCtx,
- int flags, /* Mask of FTS5_TOKENIZE_* flags */
- const char *pText, int nText,
- int (*xToken)(
- void *pCtx, /* Copy of 2nd argument to xTokenize() */
- int tflags, /* Mask of FTS5_TOKEN_* flags */
- const char *pToken, /* Pointer to buffer containing token */
- int nToken, /* Size of token in bytes */
- int iStart, /* Byte offset of token within input text */
- int iEnd /* Byte offset of end of token within input text */
- )
- );
-};
-
-/* Flags that may be passed as the third argument to xTokenize() */
-#define FTS5_TOKENIZE_QUERY 0x0001
-#define FTS5_TOKENIZE_PREFIX 0x0002
-#define FTS5_TOKENIZE_DOCUMENT 0x0004
-#define FTS5_TOKENIZE_AUX 0x0008
-
-/* Flags that may be passed by the tokenizer implementation back to FTS5
-** as the third argument to the supplied xToken callback. */
-#define FTS5_TOKEN_COLOCATED 0x0001 /* Same position as prev. token */
-
-/*
-** END OF CUSTOM TOKENIZERS
-*************************************************************************/
-
-/*************************************************************************
-** FTS5 EXTENSION REGISTRATION API
-*/
-typedef struct fts5_api fts5_api;
-struct fts5_api {
- int iVersion; /* Currently always set to 2 */
-
- /* Create a new tokenizer */
- int (*xCreateTokenizer)(
- fts5_api *pApi,
- const char *zName,
- void *pContext,
- fts5_tokenizer *pTokenizer,
- void (*xDestroy)(void*)
- );
-
- /* Find an existing tokenizer */
- int (*xFindTokenizer)(
- fts5_api *pApi,
- const char *zName,
- void **ppContext,
- fts5_tokenizer *pTokenizer
- );
-
- /* Create a new auxiliary function */
- int (*xCreateFunction)(
- fts5_api *pApi,
- const char *zName,
- void *pContext,
- fts5_extension_function xFunction,
- void (*xDestroy)(void*)
- );
-};
-
-/*
-** END OF REGISTRATION API
-*************************************************************************/
-
-#if 0
-} /* end of the 'extern "C"' block */
-#endif
-
-#endif /* _FTS5_H */
-
-/******** End of fts5.h *********/
-
-/************** End of sqlite3.h *********************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-
-/*
-** Include the configuration header output by 'configure' if we're using the
-** autoconf-based build
-*/
-#ifdef _HAVE_SQLITE_CONFIG_H
-#include "config.h"
-#endif
-
-/************** Include sqliteLimit.h in the middle of sqliteInt.h ***********/
-/************** Begin file sqliteLimit.h *************************************/
-/*
-** 2007 May 7
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** This file defines various limits of what SQLite can process.
-*/
-
-/*
-** The maximum length of a TEXT or BLOB in bytes. This also
-** limits the size of a row in a table or index.
-**
-** The hard limit is the ability of a 32-bit signed integer
-** to count the size: 2^31-1 or 2147483647.
-*/
-#ifndef SQLITE_MAX_LENGTH
-# define SQLITE_MAX_LENGTH 1000000000
-#endif
-
-/*
-** This is the maximum number of
-**
-** * Columns in a table
-** * Columns in an index
-** * Columns in a view
-** * Terms in the SET clause of an UPDATE statement
-** * Terms in the result set of a SELECT statement
-** * Terms in the GROUP BY or ORDER BY clauses of a SELECT statement.
-** * Terms in the VALUES clause of an INSERT statement
-**
-** The hard upper limit here is 32676. Most database people will
-** tell you that in a well-normalized database, you usually should
-** not have more than a dozen or so columns in any table. And if
-** that is the case, there is no point in having more than a few
-** dozen values in any of the other situations described above.
-*/
-#ifndef SQLITE_MAX_COLUMN
-# define SQLITE_MAX_COLUMN 2000
-#endif
-
-/*
-** The maximum length of a single SQL statement in bytes.
-**
-** It used to be the case that setting this value to zero would
-** turn the limit off. That is no longer true. It is not possible
-** to turn this limit off.
-*/
-#ifndef SQLITE_MAX_SQL_LENGTH
-# define SQLITE_MAX_SQL_LENGTH 1000000000
-#endif
-
-/*
-** The maximum depth of an expression tree. This is limited to
-** some extent by SQLITE_MAX_SQL_LENGTH. But sometime you might
-** want to place more severe limits on the complexity of an
-** expression.
-**
-** A value of 0 used to mean that the limit was not enforced.
-** But that is no longer true. The limit is now strictly enforced
-** at all times.
-*/
-#ifndef SQLITE_MAX_EXPR_DEPTH
-# define SQLITE_MAX_EXPR_DEPTH 1000
-#endif
-
-/*
-** The maximum number of terms in a compound SELECT statement.
-** The code generator for compound SELECT statements does one
-** level of recursion for each term. A stack overflow can result
-** if the number of terms is too large. In practice, most SQL
-** never has more than 3 or 4 terms. Use a value of 0 to disable
-** any limit on the number of terms in a compount SELECT.
-*/
-#ifndef SQLITE_MAX_COMPOUND_SELECT
-# define SQLITE_MAX_COMPOUND_SELECT 500
-#endif
-
-/*
-** The maximum number of opcodes in a VDBE program.
-** Not currently enforced.
-*/
-#ifndef SQLITE_MAX_VDBE_OP
-# define SQLITE_MAX_VDBE_OP 25000
-#endif
-
-/*
-** The maximum number of arguments to an SQL function.
-*/
-#ifndef SQLITE_MAX_FUNCTION_ARG
-# define SQLITE_MAX_FUNCTION_ARG 127
-#endif
-
-/*
-** The suggested maximum number of in-memory pages to use for
-** the main database table and for temporary tables.
-**
-** IMPLEMENTATION-OF: R-30185-15359 The default suggested cache size is -2000,
-** which means the cache size is limited to 2048000 bytes of memory.
-** IMPLEMENTATION-OF: R-48205-43578 The default suggested cache size can be
-** altered using the SQLITE_DEFAULT_CACHE_SIZE compile-time options.
-*/
-#ifndef SQLITE_DEFAULT_CACHE_SIZE
-# define SQLITE_DEFAULT_CACHE_SIZE -2000
-#endif
-
-/*
-** The default number of frames to accumulate in the log file before
-** checkpointing the database in WAL mode.
-*/
-#ifndef SQLITE_DEFAULT_WAL_AUTOCHECKPOINT
-# define SQLITE_DEFAULT_WAL_AUTOCHECKPOINT 1000
-#endif
-
-/*
-** The maximum number of attached databases. This must be between 0
-** and 125. The upper bound of 125 is because the attached databases are
-** counted using a signed 8-bit integer which has a maximum value of 127
-** and we have to allow 2 extra counts for the "main" and "temp" databases.
-*/
-#ifndef SQLITE_MAX_ATTACHED
-# define SQLITE_MAX_ATTACHED 10
-#endif
-
-
-/*
-** The maximum value of a ?nnn wildcard that the parser will accept.
-*/
-#ifndef SQLITE_MAX_VARIABLE_NUMBER
-# define SQLITE_MAX_VARIABLE_NUMBER 999
-#endif
-
-/* Maximum page size. The upper bound on this value is 65536. This a limit
-** imposed by the use of 16-bit offsets within each page.
-**
-** Earlier versions of SQLite allowed the user to change this value at
-** compile time. This is no longer permitted, on the grounds that it creates
-** a library that is technically incompatible with an SQLite library
-** compiled with a different limit. If a process operating on a database
-** with a page-size of 65536 bytes crashes, then an instance of SQLite
-** compiled with the default page-size limit will not be able to rollback
-** the aborted transaction. This could lead to database corruption.
-*/
-#ifdef SQLITE_MAX_PAGE_SIZE
-# undef SQLITE_MAX_PAGE_SIZE
-#endif
-#define SQLITE_MAX_PAGE_SIZE 65536
-
-
-/*
-** The default size of a database page.
-*/
-#ifndef SQLITE_DEFAULT_PAGE_SIZE
-# define SQLITE_DEFAULT_PAGE_SIZE 4096
-#endif
-#if SQLITE_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE
-# undef SQLITE_DEFAULT_PAGE_SIZE
-# define SQLITE_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE
-#endif
-
-/*
-** Ordinarily, if no value is explicitly provided, SQLite creates databases
-** with page size SQLITE_DEFAULT_PAGE_SIZE. However, based on certain
-** device characteristics (sector-size and atomic write() support),
-** SQLite may choose a larger value. This constant is the maximum value
-** SQLite will choose on its own.
-*/
-#ifndef SQLITE_MAX_DEFAULT_PAGE_SIZE
-# define SQLITE_MAX_DEFAULT_PAGE_SIZE 8192
-#endif
-#if SQLITE_MAX_DEFAULT_PAGE_SIZE>SQLITE_MAX_PAGE_SIZE
-# undef SQLITE_MAX_DEFAULT_PAGE_SIZE
-# define SQLITE_MAX_DEFAULT_PAGE_SIZE SQLITE_MAX_PAGE_SIZE
-#endif
-
-
-/*
-** Maximum number of pages in one database file.
-**
-** This is really just the default value for the max_page_count pragma.
-** This value can be lowered (or raised) at run-time using that the
-** max_page_count macro.
-*/
-#ifndef SQLITE_MAX_PAGE_COUNT
-# define SQLITE_MAX_PAGE_COUNT 1073741823
-#endif
-
-/*
-** Maximum length (in bytes) of the pattern in a LIKE or GLOB
-** operator.
-*/
-#ifndef SQLITE_MAX_LIKE_PATTERN_LENGTH
-# define SQLITE_MAX_LIKE_PATTERN_LENGTH 50000
-#endif
-
-/*
-** Maximum depth of recursion for triggers.
-**
-** A value of 1 means that a trigger program will not be able to itself
-** fire any triggers. A value of 0 means that no trigger programs at all
-** may be executed.
-*/
-#ifndef SQLITE_MAX_TRIGGER_DEPTH
-# define SQLITE_MAX_TRIGGER_DEPTH 1000
-#endif
-
-/************** End of sqliteLimit.h *****************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-
-/* Disable nuisance warnings on Borland compilers */
-#if defined(__BORLANDC__)
-#pragma warn -rch /* unreachable code */
-#pragma warn -ccc /* Condition is always true or false */
-#pragma warn -aus /* Assigned value is never used */
-#pragma warn -csu /* Comparing signed and unsigned */
-#pragma warn -spa /* Suspicious pointer arithmetic */
-#endif
-
-/*
-** Include standard header files as necessary
-*/
-#ifdef HAVE_STDINT_H
-#include
-#endif
-#ifdef HAVE_INTTYPES_H
-#include
-#endif
-
-/*
-** The following macros are used to cast pointers to integers and
-** integers to pointers. The way you do this varies from one compiler
-** to the next, so we have developed the following set of #if statements
-** to generate appropriate macros for a wide range of compilers.
-**
-** The correct "ANSI" way to do this is to use the intptr_t type.
-** Unfortunately, that typedef is not available on all compilers, or
-** if it is available, it requires an #include of specific headers
-** that vary from one machine to the next.
-**
-** Ticket #3860: The llvm-gcc-4.2 compiler from Apple chokes on
-** the ((void*)&((char*)0)[X]) construct. But MSVC chokes on ((void*)(X)).
-** So we have to define the macros in different ways depending on the
-** compiler.
-*/
-#if defined(__PTRDIFF_TYPE__) /* This case should work for GCC */
-# define SQLITE_INT_TO_PTR(X) ((void*)(__PTRDIFF_TYPE__)(X))
-# define SQLITE_PTR_TO_INT(X) ((int)(__PTRDIFF_TYPE__)(X))
-#elif !defined(__GNUC__) /* Works for compilers other than LLVM */
-# define SQLITE_INT_TO_PTR(X) ((void*)&((char*)0)[X])
-# define SQLITE_PTR_TO_INT(X) ((int)(((char*)X)-(char*)0))
-#elif defined(HAVE_STDINT_H) /* Use this case if we have ANSI headers */
-# define SQLITE_INT_TO_PTR(X) ((void*)(intptr_t)(X))
-# define SQLITE_PTR_TO_INT(X) ((int)(intptr_t)(X))
-#else /* Generates a warning - but it always works */
-# define SQLITE_INT_TO_PTR(X) ((void*)(X))
-# define SQLITE_PTR_TO_INT(X) ((int)(X))
-#endif
-
-/*
-** A macro to hint to the compiler that a function should not be
-** inlined.
-*/
-#if defined(__GNUC__)
-# define SQLITE_NOINLINE __attribute__((noinline))
-#elif defined(_MSC_VER) && _MSC_VER>=1310
-# define SQLITE_NOINLINE __declspec(noinline)
-#else
-# define SQLITE_NOINLINE
-#endif
-
-/*
-** Make sure that the compiler intrinsics we desire are enabled when
-** compiling with an appropriate version of MSVC unless prevented by
-** the SQLITE_DISABLE_INTRINSIC define.
-*/
-#if !defined(SQLITE_DISABLE_INTRINSIC)
-# if defined(_MSC_VER) && _MSC_VER>=1400
-# if !defined(_WIN32_WCE)
-# include
-# pragma intrinsic(_byteswap_ushort)
-# pragma intrinsic(_byteswap_ulong)
-# pragma intrinsic(_ReadWriteBarrier)
-# else
-# include
-# endif
-# endif
-#endif
-
-/*
-** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2.
-** 0 means mutexes are permanently disable and the library is never
-** threadsafe. 1 means the library is serialized which is the highest
-** level of threadsafety. 2 means the library is multithreaded - multiple
-** threads can use SQLite as long as no two threads try to use the same
-** database connection at the same time.
-**
-** Older versions of SQLite used an optional THREADSAFE macro.
-** We support that for legacy.
-*/
-#if !defined(SQLITE_THREADSAFE)
-# if defined(THREADSAFE)
-# define SQLITE_THREADSAFE THREADSAFE
-# else
-# define SQLITE_THREADSAFE 1 /* IMP: R-07272-22309 */
-# endif
-#endif
-
-/*
-** Powersafe overwrite is on by default. But can be turned off using
-** the -DSQLITE_POWERSAFE_OVERWRITE=0 command-line option.
-*/
-#ifndef SQLITE_POWERSAFE_OVERWRITE
-# define SQLITE_POWERSAFE_OVERWRITE 1
-#endif
-
-/*
-** EVIDENCE-OF: R-25715-37072 Memory allocation statistics are enabled by
-** default unless SQLite is compiled with SQLITE_DEFAULT_MEMSTATUS=0 in
-** which case memory allocation statistics are disabled by default.
-*/
-#if !defined(SQLITE_DEFAULT_MEMSTATUS)
-# define SQLITE_DEFAULT_MEMSTATUS 1
-#endif
-
-/*
-** Exactly one of the following macros must be defined in order to
-** specify which memory allocation subsystem to use.
-**
-** SQLITE_SYSTEM_MALLOC // Use normal system malloc()
-** SQLITE_WIN32_MALLOC // Use Win32 native heap API
-** SQLITE_ZERO_MALLOC // Use a stub allocator that always fails
-** SQLITE_MEMDEBUG // Debugging version of system malloc()
-**
-** On Windows, if the SQLITE_WIN32_MALLOC_VALIDATE macro is defined and the
-** assert() macro is enabled, each call into the Win32 native heap subsystem
-** will cause HeapValidate to be called. If heap validation should fail, an
-** assertion will be triggered.
-**
-** If none of the above are defined, then set SQLITE_SYSTEM_MALLOC as
-** the default.
-*/
-#if defined(SQLITE_SYSTEM_MALLOC) \
- + defined(SQLITE_WIN32_MALLOC) \
- + defined(SQLITE_ZERO_MALLOC) \
- + defined(SQLITE_MEMDEBUG)>1
-# error "Two or more of the following compile-time configuration options\
- are defined but at most one is allowed:\
- SQLITE_SYSTEM_MALLOC, SQLITE_WIN32_MALLOC, SQLITE_MEMDEBUG,\
- SQLITE_ZERO_MALLOC"
-#endif
-#if defined(SQLITE_SYSTEM_MALLOC) \
- + defined(SQLITE_WIN32_MALLOC) \
- + defined(SQLITE_ZERO_MALLOC) \
- + defined(SQLITE_MEMDEBUG)==0
-# define SQLITE_SYSTEM_MALLOC 1
-#endif
-
-/*
-** If SQLITE_MALLOC_SOFT_LIMIT is not zero, then try to keep the
-** sizes of memory allocations below this value where possible.
-*/
-#if !defined(SQLITE_MALLOC_SOFT_LIMIT)
-# define SQLITE_MALLOC_SOFT_LIMIT 1024
-#endif
-
-/*
-** We need to define _XOPEN_SOURCE as follows in order to enable
-** recursive mutexes on most Unix systems and fchmod() on OpenBSD.
-** But _XOPEN_SOURCE define causes problems for Mac OS X, so omit
-** it.
-*/
-#if !defined(_XOPEN_SOURCE) && !defined(__DARWIN__) && !defined(__APPLE__)
-# define _XOPEN_SOURCE 600
-#endif
-
-/*
-** NDEBUG and SQLITE_DEBUG are opposites. It should always be true that
-** defined(NDEBUG)==!defined(SQLITE_DEBUG). If this is not currently true,
-** make it true by defining or undefining NDEBUG.
-**
-** Setting NDEBUG makes the code smaller and faster by disabling the
-** assert() statements in the code. So we want the default action
-** to be for NDEBUG to be set and NDEBUG to be undefined only if SQLITE_DEBUG
-** is set. Thus NDEBUG becomes an opt-in rather than an opt-out
-** feature.
-*/
-#if !defined(NDEBUG) && !defined(SQLITE_DEBUG)
-# define NDEBUG 1
-#endif
-#if defined(NDEBUG) && defined(SQLITE_DEBUG)
-# undef NDEBUG
-#endif
-
-/*
-** Enable SQLITE_ENABLE_EXPLAIN_COMMENTS if SQLITE_DEBUG is turned on.
-*/
-#if !defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) && defined(SQLITE_DEBUG)
-# define SQLITE_ENABLE_EXPLAIN_COMMENTS 1
-#endif
-
-/*
-** The testcase() macro is used to aid in coverage testing. When
-** doing coverage testing, the condition inside the argument to
-** testcase() must be evaluated both true and false in order to
-** get full branch coverage. The testcase() macro is inserted
-** to help ensure adequate test coverage in places where simple
-** condition/decision coverage is inadequate. For example, testcase()
-** can be used to make sure boundary values are tested. For
-** bitmask tests, testcase() can be used to make sure each bit
-** is significant and used at least once. On switch statements
-** where multiple cases go to the same block of code, testcase()
-** can insure that all cases are evaluated.
-**
-*/
-#ifdef SQLITE_COVERAGE_TEST
-SQLITE_PRIVATE void sqlite3Coverage(int);
-# define testcase(X) if( X ){ sqlite3Coverage(__LINE__); }
-#else
-# define testcase(X)
-#endif
-
-/*
-** The TESTONLY macro is used to enclose variable declarations or
-** other bits of code that are needed to support the arguments
-** within testcase() and assert() macros.
-*/
-#if !defined(NDEBUG) || defined(SQLITE_COVERAGE_TEST)
-# define TESTONLY(X) X
-#else
-# define TESTONLY(X)
-#endif
-
-/*
-** Sometimes we need a small amount of code such as a variable initialization
-** to setup for a later assert() statement. We do not want this code to
-** appear when assert() is disabled. The following macro is therefore
-** used to contain that setup code. The "VVA" acronym stands for
-** "Verification, Validation, and Accreditation". In other words, the
-** code within VVA_ONLY() will only run during verification processes.
-*/
-#ifndef NDEBUG
-# define VVA_ONLY(X) X
-#else
-# define VVA_ONLY(X)
-#endif
-
-/*
-** The ALWAYS and NEVER macros surround boolean expressions which
-** are intended to always be true or false, respectively. Such
-** expressions could be omitted from the code completely. But they
-** are included in a few cases in order to enhance the resilience
-** of SQLite to unexpected behavior - to make the code "self-healing"
-** or "ductile" rather than being "brittle" and crashing at the first
-** hint of unplanned behavior.
-**
-** In other words, ALWAYS and NEVER are added for defensive code.
-**
-** When doing coverage testing ALWAYS and NEVER are hard-coded to
-** be true and false so that the unreachable code they specify will
-** not be counted as untested code.
-*/
-#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_MUTATION_TEST)
-# define ALWAYS(X) (1)
-# define NEVER(X) (0)
-#elif !defined(NDEBUG)
-# define ALWAYS(X) ((X)?1:(assert(0),0))
-# define NEVER(X) ((X)?(assert(0),1):0)
-#else
-# define ALWAYS(X) (X)
-# define NEVER(X) (X)
-#endif
-
-/*
-** Some malloc failures are only possible if SQLITE_TEST_REALLOC_STRESS is
-** defined. We need to defend against those failures when testing with
-** SQLITE_TEST_REALLOC_STRESS, but we don't want the unreachable branches
-** during a normal build. The following macro can be used to disable tests
-** that are always false except when SQLITE_TEST_REALLOC_STRESS is set.
-*/
-#if defined(SQLITE_TEST_REALLOC_STRESS)
-# define ONLY_IF_REALLOC_STRESS(X) (X)
-#elif !defined(NDEBUG)
-# define ONLY_IF_REALLOC_STRESS(X) ((X)?(assert(0),1):0)
-#else
-# define ONLY_IF_REALLOC_STRESS(X) (0)
-#endif
-
-/*
-** Declarations used for tracing the operating system interfaces.
-*/
-#if defined(SQLITE_FORCE_OS_TRACE) || defined(SQLITE_TEST) || \
- (defined(SQLITE_DEBUG) && SQLITE_OS_WIN)
- extern int sqlite3OSTrace;
-# define OSTRACE(X) if( sqlite3OSTrace ) sqlite3DebugPrintf X
-# define SQLITE_HAVE_OS_TRACE
-#else
-# define OSTRACE(X)
-# undef SQLITE_HAVE_OS_TRACE
-#endif
-
-/*
-** Is the sqlite3ErrName() function needed in the build? Currently,
-** it is needed by "mutex_w32.c" (when debugging), "os_win.c" (when
-** OSTRACE is enabled), and by several "test*.c" files (which are
-** compiled using SQLITE_TEST).
-*/
-#if defined(SQLITE_HAVE_OS_TRACE) || defined(SQLITE_TEST) || \
- (defined(SQLITE_DEBUG) && SQLITE_OS_WIN)
-# define SQLITE_NEED_ERR_NAME
-#else
-# undef SQLITE_NEED_ERR_NAME
-#endif
-
-/*
-** SQLITE_ENABLE_EXPLAIN_COMMENTS is incompatible with SQLITE_OMIT_EXPLAIN
-*/
-#ifdef SQLITE_OMIT_EXPLAIN
-# undef SQLITE_ENABLE_EXPLAIN_COMMENTS
-#endif
-
-/*
-** Return true (non-zero) if the input is an integer that is too large
-** to fit in 32-bits. This macro is used inside of various testcase()
-** macros to verify that we have tested SQLite for large-file support.
-*/
-#define IS_BIG_INT(X) (((X)&~(i64)0xffffffff)!=0)
-
-/*
-** The macro unlikely() is a hint that surrounds a boolean
-** expression that is usually false. Macro likely() surrounds
-** a boolean expression that is usually true. These hints could,
-** in theory, be used by the compiler to generate better code, but
-** currently they are just comments for human readers.
-*/
-#define likely(X) (X)
-#define unlikely(X) (X)
-
-/************** Include hash.h in the middle of sqliteInt.h ******************/
-/************** Begin file hash.h ********************************************/
-/*
-** 2001 September 22
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This is the header file for the generic hash-table implementation
-** used in SQLite.
-*/
-#ifndef SQLITE_HASH_H
-#define SQLITE_HASH_H
-
-/* Forward declarations of structures. */
-typedef struct Hash Hash;
-typedef struct HashElem HashElem;
-
-/* A complete hash table is an instance of the following structure.
-** The internals of this structure are intended to be opaque -- client
-** code should not attempt to access or modify the fields of this structure
-** directly. Change this structure only by using the routines below.
-** However, some of the "procedures" and "functions" for modifying and
-** accessing this structure are really macros, so we can't really make
-** this structure opaque.
-**
-** All elements of the hash table are on a single doubly-linked list.
-** Hash.first points to the head of this list.
-**
-** There are Hash.htsize buckets. Each bucket points to a spot in
-** the global doubly-linked list. The contents of the bucket are the
-** element pointed to plus the next _ht.count-1 elements in the list.
-**
-** Hash.htsize and Hash.ht may be zero. In that case lookup is done
-** by a linear search of the global list. For small tables, the
-** Hash.ht table is never allocated because if there are few elements
-** in the table, it is faster to do a linear search than to manage
-** the hash table.
-*/
-struct Hash {
- unsigned int htsize; /* Number of buckets in the hash table */
- unsigned int count; /* Number of entries in this table */
- HashElem *first; /* The first element of the array */
- struct _ht { /* the hash table */
- int count; /* Number of entries with this hash */
- HashElem *chain; /* Pointer to first entry with this hash */
- } *ht;
-};
-
-/* Each element in the hash table is an instance of the following
-** structure. All elements are stored on a single doubly-linked list.
-**
-** Again, this structure is intended to be opaque, but it can't really
-** be opaque because it is used by macros.
-*/
-struct HashElem {
- HashElem *next, *prev; /* Next and previous elements in the table */
- void *data; /* Data associated with this element */
- const char *pKey; /* Key associated with this element */
-};
-
-/*
-** Access routines. To delete, insert a NULL pointer.
-*/
-SQLITE_PRIVATE void sqlite3HashInit(Hash*);
-SQLITE_PRIVATE void *sqlite3HashInsert(Hash*, const char *pKey, void *pData);
-SQLITE_PRIVATE void *sqlite3HashFind(const Hash*, const char *pKey);
-SQLITE_PRIVATE void sqlite3HashClear(Hash*);
-
-/*
-** Macros for looping over all elements of a hash table. The idiom is
-** like this:
-**
-** Hash h;
-** HashElem *p;
-** ...
-** for(p=sqliteHashFirst(&h); p; p=sqliteHashNext(p)){
-** SomeStructure *pData = sqliteHashData(p);
-** // do something with pData
-** }
-*/
-#define sqliteHashFirst(H) ((H)->first)
-#define sqliteHashNext(E) ((E)->next)
-#define sqliteHashData(E) ((E)->data)
-/* #define sqliteHashKey(E) ((E)->pKey) // NOT USED */
-/* #define sqliteHashKeysize(E) ((E)->nKey) // NOT USED */
-
-/*
-** Number of entries in a hash table
-*/
-/* #define sqliteHashCount(H) ((H)->count) // NOT USED */
-
-#endif /* SQLITE_HASH_H */
-
-/************** End of hash.h ************************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-/************** Include parse.h in the middle of sqliteInt.h *****************/
-/************** Begin file parse.h *******************************************/
-#define TK_SEMI 1
-#define TK_EXPLAIN 2
-#define TK_QUERY 3
-#define TK_PLAN 4
-#define TK_BEGIN 5
-#define TK_TRANSACTION 6
-#define TK_DEFERRED 7
-#define TK_IMMEDIATE 8
-#define TK_EXCLUSIVE 9
-#define TK_COMMIT 10
-#define TK_END 11
-#define TK_ROLLBACK 12
-#define TK_SAVEPOINT 13
-#define TK_RELEASE 14
-#define TK_TO 15
-#define TK_TABLE 16
-#define TK_CREATE 17
-#define TK_IF 18
-#define TK_NOT 19
-#define TK_EXISTS 20
-#define TK_TEMP 21
-#define TK_LP 22
-#define TK_RP 23
-#define TK_AS 24
-#define TK_WITHOUT 25
-#define TK_COMMA 26
-#define TK_OR 27
-#define TK_AND 28
-#define TK_IS 29
-#define TK_MATCH 30
-#define TK_LIKE_KW 31
-#define TK_BETWEEN 32
-#define TK_IN 33
-#define TK_ISNULL 34
-#define TK_NOTNULL 35
-#define TK_NE 36
-#define TK_EQ 37
-#define TK_GT 38
-#define TK_LE 39
-#define TK_LT 40
-#define TK_GE 41
-#define TK_ESCAPE 42
-#define TK_BITAND 43
-#define TK_BITOR 44
-#define TK_LSHIFT 45
-#define TK_RSHIFT 46
-#define TK_PLUS 47
-#define TK_MINUS 48
-#define TK_STAR 49
-#define TK_SLASH 50
-#define TK_REM 51
-#define TK_CONCAT 52
-#define TK_COLLATE 53
-#define TK_BITNOT 54
-#define TK_ID 55
-#define TK_INDEXED 56
-#define TK_ABORT 57
-#define TK_ACTION 58
-#define TK_AFTER 59
-#define TK_ANALYZE 60
-#define TK_ASC 61
-#define TK_ATTACH 62
-#define TK_BEFORE 63
-#define TK_BY 64
-#define TK_CASCADE 65
-#define TK_CAST 66
-#define TK_COLUMNKW 67
-#define TK_CONFLICT 68
-#define TK_DATABASE 69
-#define TK_DESC 70
-#define TK_DETACH 71
-#define TK_EACH 72
-#define TK_FAIL 73
-#define TK_FOR 74
-#define TK_IGNORE 75
-#define TK_INITIALLY 76
-#define TK_INSTEAD 77
-#define TK_NO 78
-#define TK_KEY 79
-#define TK_OF 80
-#define TK_OFFSET 81
-#define TK_PRAGMA 82
-#define TK_RAISE 83
-#define TK_RECURSIVE 84
-#define TK_REPLACE 85
-#define TK_RESTRICT 86
-#define TK_ROW 87
-#define TK_TRIGGER 88
-#define TK_VACUUM 89
-#define TK_VIEW 90
-#define TK_VIRTUAL 91
-#define TK_WITH 92
-#define TK_REINDEX 93
-#define TK_RENAME 94
-#define TK_CTIME_KW 95
-#define TK_ANY 96
-#define TK_STRING 97
-#define TK_JOIN_KW 98
-#define TK_CONSTRAINT 99
-#define TK_DEFAULT 100
-#define TK_NULL 101
-#define TK_PRIMARY 102
-#define TK_UNIQUE 103
-#define TK_CHECK 104
-#define TK_REFERENCES 105
-#define TK_AUTOINCR 106
-#define TK_ON 107
-#define TK_INSERT 108
-#define TK_DELETE 109
-#define TK_UPDATE 110
-#define TK_SET 111
-#define TK_DEFERRABLE 112
-#define TK_FOREIGN 113
-#define TK_DROP 114
-#define TK_UNION 115
-#define TK_ALL 116
-#define TK_EXCEPT 117
-#define TK_INTERSECT 118
-#define TK_SELECT 119
-#define TK_VALUES 120
-#define TK_DISTINCT 121
-#define TK_DOT 122
-#define TK_FROM 123
-#define TK_JOIN 124
-#define TK_USING 125
-#define TK_ORDER 126
-#define TK_GROUP 127
-#define TK_HAVING 128
-#define TK_LIMIT 129
-#define TK_WHERE 130
-#define TK_INTO 131
-#define TK_INTEGER 132
-#define TK_FLOAT 133
-#define TK_BLOB 134
-#define TK_VARIABLE 135
-#define TK_CASE 136
-#define TK_WHEN 137
-#define TK_THEN 138
-#define TK_ELSE 139
-#define TK_INDEX 140
-#define TK_ALTER 141
-#define TK_ADD 142
-#define TK_TO_TEXT 143
-#define TK_TO_BLOB 144
-#define TK_TO_NUMERIC 145
-#define TK_TO_INT 146
-#define TK_TO_REAL 147
-#define TK_ISNOT 148
-#define TK_END_OF_FILE 149
-#define TK_UNCLOSED_STRING 150
-#define TK_FUNCTION 151
-#define TK_COLUMN 152
-#define TK_AGG_FUNCTION 153
-#define TK_AGG_COLUMN 154
-#define TK_UMINUS 155
-#define TK_UPLUS 156
-#define TK_REGISTER 157
-#define TK_ASTERISK 158
-#define TK_SPAN 159
-#define TK_SPACE 160
-#define TK_ILLEGAL 161
-
-/* The token codes above must all fit in 8 bits */
-#define TKFLG_MASK 0xff
-
-/* Flags that can be added to a token code when it is not
-** being stored in a u8: */
-#define TKFLG_DONTFOLD 0x100 /* Omit constant folding optimizations */
-
-/************** End of parse.h ***********************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-#include
-#include
-#include
-#include
-#include
-
-/*
-** If compiling for a processor that lacks floating point support,
-** substitute integer for floating-point
-*/
-#ifdef SQLITE_OMIT_FLOATING_POINT
-# define double sqlite_int64
-# define float sqlite_int64
-# define LONGDOUBLE_TYPE sqlite_int64
-# ifndef SQLITE_BIG_DBL
-# define SQLITE_BIG_DBL (((sqlite3_int64)1)<<50)
-# endif
-# define SQLITE_OMIT_DATETIME_FUNCS 1
-# define SQLITE_OMIT_TRACE 1
-# undef SQLITE_MIXED_ENDIAN_64BIT_FLOAT
-# undef SQLITE_HAVE_ISNAN
-#endif
-#ifndef SQLITE_BIG_DBL
-# define SQLITE_BIG_DBL (1e99)
-#endif
-
-/*
-** OMIT_TEMPDB is set to 1 if SQLITE_OMIT_TEMPDB is defined, or 0
-** afterward. Having this macro allows us to cause the C compiler
-** to omit code used by TEMP tables without messy #ifndef statements.
-*/
-#ifdef SQLITE_OMIT_TEMPDB
-#define OMIT_TEMPDB 1
-#else
-#define OMIT_TEMPDB 0
-#endif
-
-/*
-** The "file format" number is an integer that is incremented whenever
-** the VDBE-level file format changes. The following macros define the
-** the default file format for new databases and the maximum file format
-** that the library can read.
-*/
-#define SQLITE_MAX_FILE_FORMAT 4
-#ifndef SQLITE_DEFAULT_FILE_FORMAT
-# define SQLITE_DEFAULT_FILE_FORMAT 4
-#endif
-
-/*
-** Determine whether triggers are recursive by default. This can be
-** changed at run-time using a pragma.
-*/
-#ifndef SQLITE_DEFAULT_RECURSIVE_TRIGGERS
-# define SQLITE_DEFAULT_RECURSIVE_TRIGGERS 0
-#endif
-
-/*
-** Provide a default value for SQLITE_TEMP_STORE in case it is not specified
-** on the command-line
-*/
-#ifndef SQLITE_TEMP_STORE
-# define SQLITE_TEMP_STORE 1
-# define SQLITE_TEMP_STORE_xc 1 /* Exclude from ctime.c */
-#endif
-
-/*
-** If no value has been provided for SQLITE_MAX_WORKER_THREADS, or if
-** SQLITE_TEMP_STORE is set to 3 (never use temporary files), set it
-** to zero.
-*/
-#if SQLITE_TEMP_STORE==3 || SQLITE_THREADSAFE==0
-# undef SQLITE_MAX_WORKER_THREADS
-# define SQLITE_MAX_WORKER_THREADS 0
-#endif
-#ifndef SQLITE_MAX_WORKER_THREADS
-# define SQLITE_MAX_WORKER_THREADS 8
-#endif
-#ifndef SQLITE_DEFAULT_WORKER_THREADS
-# define SQLITE_DEFAULT_WORKER_THREADS 0
-#endif
-#if SQLITE_DEFAULT_WORKER_THREADS>SQLITE_MAX_WORKER_THREADS
-# undef SQLITE_MAX_WORKER_THREADS
-# define SQLITE_MAX_WORKER_THREADS SQLITE_DEFAULT_WORKER_THREADS
-#endif
-
-/*
-** The default initial allocation for the pagecache when using separate
-** pagecaches for each database connection. A positive number is the
-** number of pages. A negative number N translations means that a buffer
-** of -1024*N bytes is allocated and used for as many pages as it will hold.
-*/
-#ifndef SQLITE_DEFAULT_PCACHE_INITSZ
-# define SQLITE_DEFAULT_PCACHE_INITSZ 100
-#endif
-
-/*
-** GCC does not define the offsetof() macro so we'll have to do it
-** ourselves.
-*/
-#ifndef offsetof
-#define offsetof(STRUCTURE,FIELD) ((int)((char*)&((STRUCTURE*)0)->FIELD))
-#endif
-
-/*
-** Macros to compute minimum and maximum of two numbers.
-*/
-#ifndef MIN
-# define MIN(A,B) ((A)<(B)?(A):(B))
-#endif
-#ifndef MAX
-# define MAX(A,B) ((A)>(B)?(A):(B))
-#endif
-
-/*
-** Swap two objects of type TYPE.
-*/
-#define SWAP(TYPE,A,B) {TYPE t=A; A=B; B=t;}
-
-/*
-** Check to see if this machine uses EBCDIC. (Yes, believe it or
-** not, there are still machines out there that use EBCDIC.)
-*/
-#if 'A' == '\301'
-# define SQLITE_EBCDIC 1
-#else
-# define SQLITE_ASCII 1
-#endif
-
-/*
-** Integers of known sizes. These typedefs might change for architectures
-** where the sizes very. Preprocessor macros are available so that the
-** types can be conveniently redefined at compile-type. Like this:
-**
-** cc '-DUINTPTR_TYPE=long long int' ...
-*/
-#ifndef UINT32_TYPE
-# ifdef HAVE_UINT32_T
-# define UINT32_TYPE uint32_t
-# else
-# define UINT32_TYPE unsigned int
-# endif
-#endif
-#ifndef UINT16_TYPE
-# ifdef HAVE_UINT16_T
-# define UINT16_TYPE uint16_t
-# else
-# define UINT16_TYPE unsigned short int
-# endif
-#endif
-#ifndef INT16_TYPE
-# ifdef HAVE_INT16_T
-# define INT16_TYPE int16_t
-# else
-# define INT16_TYPE short int
-# endif
-#endif
-#ifndef UINT8_TYPE
-# ifdef HAVE_UINT8_T
-# define UINT8_TYPE uint8_t
-# else
-# define UINT8_TYPE unsigned char
-# endif
-#endif
-#ifndef INT8_TYPE
-# ifdef HAVE_INT8_T
-# define INT8_TYPE int8_t
-# else
-# define INT8_TYPE signed char
-# endif
-#endif
-#ifndef LONGDOUBLE_TYPE
-# define LONGDOUBLE_TYPE long double
-#endif
-typedef sqlite_int64 i64; /* 8-byte signed integer */
-typedef sqlite_uint64 u64; /* 8-byte unsigned integer */
-typedef UINT32_TYPE u32; /* 4-byte unsigned integer */
-typedef UINT16_TYPE u16; /* 2-byte unsigned integer */
-typedef INT16_TYPE i16; /* 2-byte signed integer */
-typedef UINT8_TYPE u8; /* 1-byte unsigned integer */
-typedef INT8_TYPE i8; /* 1-byte signed integer */
-
-/*
-** SQLITE_MAX_U32 is a u64 constant that is the maximum u64 value
-** that can be stored in a u32 without loss of data. The value
-** is 0x00000000ffffffff. But because of quirks of some compilers, we
-** have to specify the value in the less intuitive manner shown:
-*/
-#define SQLITE_MAX_U32 ((((u64)1)<<32)-1)
-
-/*
-** The datatype used to store estimates of the number of rows in a
-** table or index. This is an unsigned integer type. For 99.9% of
-** the world, a 32-bit integer is sufficient. But a 64-bit integer
-** can be used at compile-time if desired.
-*/
-#ifdef SQLITE_64BIT_STATS
- typedef u64 tRowcnt; /* 64-bit only if requested at compile-time */
-#else
- typedef u32 tRowcnt; /* 32-bit is the default */
-#endif
-
-/*
-** Estimated quantities used for query planning are stored as 16-bit
-** logarithms. For quantity X, the value stored is 10*log2(X). This
-** gives a possible range of values of approximately 1.0e986 to 1e-986.
-** But the allowed values are "grainy". Not every value is representable.
-** For example, quantities 16 and 17 are both represented by a LogEst
-** of 40. However, since LogEst quantities are suppose to be estimates,
-** not exact values, this imprecision is not a problem.
-**
-** "LogEst" is short for "Logarithmic Estimate".
-**
-** Examples:
-** 1 -> 0 20 -> 43 10000 -> 132
-** 2 -> 10 25 -> 46 25000 -> 146
-** 3 -> 16 100 -> 66 1000000 -> 199
-** 4 -> 20 1000 -> 99 1048576 -> 200
-** 10 -> 33 1024 -> 100 4294967296 -> 320
-**
-** The LogEst can be negative to indicate fractional values.
-** Examples:
-**
-** 0.5 -> -10 0.1 -> -33 0.0625 -> -40
-*/
-typedef INT16_TYPE LogEst;
-
-/*
-** Set the SQLITE_PTRSIZE macro to the number of bytes in a pointer
-*/
-#ifndef SQLITE_PTRSIZE
-# if defined(__SIZEOF_POINTER__)
-# define SQLITE_PTRSIZE __SIZEOF_POINTER__
-# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \
- defined(_M_ARM) || defined(__arm__) || defined(__x86)
-# define SQLITE_PTRSIZE 4
-# else
-# define SQLITE_PTRSIZE 8
-# endif
-#endif
-
-/* The uptr type is an unsigned integer large enough to hold a pointer
-*/
-#if defined(HAVE_STDINT_H)
- typedef uintptr_t uptr;
-#elif SQLITE_PTRSIZE==4
- typedef u32 uptr;
-#else
- typedef u64 uptr;
-#endif
-
-/*
-** The SQLITE_WITHIN(P,S,E) macro checks to see if pointer P points to
-** something between S (inclusive) and E (exclusive).
-**
-** In other words, S is a buffer and E is a pointer to the first byte after
-** the end of buffer S. This macro returns true if P points to something
-** contained within the buffer S.
-*/
-#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E)))
-
-
-/*
-** Macros to determine whether the machine is big or little endian,
-** and whether or not that determination is run-time or compile-time.
-**
-** For best performance, an attempt is made to guess at the byte-order
-** using C-preprocessor macros. If that is unsuccessful, or if
-** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined
-** at run-time.
-*/
-#if (defined(i386) || defined(__i386__) || defined(_M_IX86) || \
- defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
- defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
- defined(__arm__)) && !defined(SQLITE_RUNTIME_BYTEORDER)
-# define SQLITE_BYTEORDER 1234
-# define SQLITE_BIGENDIAN 0
-# define SQLITE_LITTLEENDIAN 1
-# define SQLITE_UTF16NATIVE SQLITE_UTF16LE
-#endif
-#if (defined(sparc) || defined(__ppc__)) \
- && !defined(SQLITE_RUNTIME_BYTEORDER)
-# define SQLITE_BYTEORDER 4321
-# define SQLITE_BIGENDIAN 1
-# define SQLITE_LITTLEENDIAN 0
-# define SQLITE_UTF16NATIVE SQLITE_UTF16BE
-#endif
-#if !defined(SQLITE_BYTEORDER)
-# ifdef SQLITE_AMALGAMATION
- const int sqlite3one = 1;
-# else
- extern const int sqlite3one;
-# endif
-# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */
-# define SQLITE_BIGENDIAN (*(char *)(&sqlite3one)==0)
-# define SQLITE_LITTLEENDIAN (*(char *)(&sqlite3one)==1)
-# define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE)
-#endif
-
-/*
-** Constants for the largest and smallest possible 64-bit signed integers.
-** These macros are designed to work correctly on both 32-bit and 64-bit
-** compilers.
-*/
-#define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32))
-#define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64)
-
-/*
-** Round up a number to the next larger multiple of 8. This is used
-** to force 8-byte alignment on 64-bit architectures.
-*/
-#define ROUND8(x) (((x)+7)&~7)
-
-/*
-** Round down to the nearest multiple of 8
-*/
-#define ROUNDDOWN8(x) ((x)&~7)
-
-/*
-** Assert that the pointer X is aligned to an 8-byte boundary. This
-** macro is used only within assert() to verify that the code gets
-** all alignment restrictions correct.
-**
-** Except, if SQLITE_4_BYTE_ALIGNED_MALLOC is defined, then the
-** underlying malloc() implementation might return us 4-byte aligned
-** pointers. In that case, only verify 4-byte alignment.
-*/
-#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC
-# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&3)==0)
-#else
-# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0)
-#endif
-
-/*
-** Disable MMAP on platforms where it is known to not work
-*/
-#if defined(__OpenBSD__) || defined(__QNXNTO__)
-# undef SQLITE_MAX_MMAP_SIZE
-# define SQLITE_MAX_MMAP_SIZE 0
-#endif
-
-/*
-** Default maximum size of memory used by memory-mapped I/O in the VFS
-*/
-#ifdef __APPLE__
-# include
-#endif
-#ifndef SQLITE_MAX_MMAP_SIZE
-# if defined(__linux__) \
- || defined(_WIN32) \
- || (defined(__APPLE__) && defined(__MACH__)) \
- || defined(__sun) \
- || defined(__FreeBSD__) \
- || defined(__DragonFly__)
-# define SQLITE_MAX_MMAP_SIZE 0x7fff0000 /* 2147418112 */
-# else
-# define SQLITE_MAX_MMAP_SIZE 0
-# endif
-# define SQLITE_MAX_MMAP_SIZE_xc 1 /* exclude from ctime.c */
-#endif
-
-/*
-** The default MMAP_SIZE is zero on all platforms. Or, even if a larger
-** default MMAP_SIZE is specified at compile-time, make sure that it does
-** not exceed the maximum mmap size.
-*/
-#ifndef SQLITE_DEFAULT_MMAP_SIZE
-# define SQLITE_DEFAULT_MMAP_SIZE 0
-# define SQLITE_DEFAULT_MMAP_SIZE_xc 1 /* Exclude from ctime.c */
-#endif
-#if SQLITE_DEFAULT_MMAP_SIZE>SQLITE_MAX_MMAP_SIZE
-# undef SQLITE_DEFAULT_MMAP_SIZE
-# define SQLITE_DEFAULT_MMAP_SIZE SQLITE_MAX_MMAP_SIZE
-#endif
-
-/*
-** Only one of SQLITE_ENABLE_STAT3 or SQLITE_ENABLE_STAT4 can be defined.
-** Priority is given to SQLITE_ENABLE_STAT4. If either are defined, also
-** define SQLITE_ENABLE_STAT3_OR_STAT4
-*/
-#ifdef SQLITE_ENABLE_STAT4
-# undef SQLITE_ENABLE_STAT3
-# define SQLITE_ENABLE_STAT3_OR_STAT4 1
-#elif SQLITE_ENABLE_STAT3
-# define SQLITE_ENABLE_STAT3_OR_STAT4 1
-#elif SQLITE_ENABLE_STAT3_OR_STAT4
-# undef SQLITE_ENABLE_STAT3_OR_STAT4
-#endif
-
-/*
-** SELECTTRACE_ENABLED will be either 1 or 0 depending on whether or not
-** the Select query generator tracing logic is turned on.
-*/
-#if defined(SQLITE_DEBUG) || defined(SQLITE_ENABLE_SELECTTRACE)
-# define SELECTTRACE_ENABLED 1
-#else
-# define SELECTTRACE_ENABLED 0
-#endif
-
-/*
-** An instance of the following structure is used to store the busy-handler
-** callback for a given sqlite handle.
-**
-** The sqlite.busyHandler member of the sqlite struct contains the busy
-** callback for the database handle. Each pager opened via the sqlite
-** handle is passed a pointer to sqlite.busyHandler. The busy-handler
-** callback is currently invoked only from within pager.c.
-*/
-typedef struct BusyHandler BusyHandler;
-struct BusyHandler {
- int (*xFunc)(void *,int); /* The busy callback */
- void *pArg; /* First arg to busy callback */
- int nBusy; /* Incremented with each busy call */
-};
-
-/*
-** Name of the master database table. The master database table
-** is a special table that holds the names and attributes of all
-** user tables and indices.
-*/
-#define MASTER_NAME "sqlite_master"
-#define TEMP_MASTER_NAME "sqlite_temp_master"
-
-/*
-** The root-page of the master database table.
-*/
-#define MASTER_ROOT 1
-
-/*
-** The name of the schema table.
-*/
-#define SCHEMA_TABLE(x) ((!OMIT_TEMPDB)&&(x==1)?TEMP_MASTER_NAME:MASTER_NAME)
-
-/*
-** A convenience macro that returns the number of elements in
-** an array.
-*/
-#define ArraySize(X) ((int)(sizeof(X)/sizeof(X[0])))
-
-/*
-** Determine if the argument is a power of two
-*/
-#define IsPowerOfTwo(X) (((X)&((X)-1))==0)
-
-/*
-** The following value as a destructor means to use sqlite3DbFree().
-** The sqlite3DbFree() routine requires two parameters instead of the
-** one parameter that destructors normally want. So we have to introduce
-** this magic value that the code knows to handle differently. Any
-** pointer will work here as long as it is distinct from SQLITE_STATIC
-** and SQLITE_TRANSIENT.
-*/
-#define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3MallocSize)
-
-/*
-** When SQLITE_OMIT_WSD is defined, it means that the target platform does
-** not support Writable Static Data (WSD) such as global and static variables.
-** All variables must either be on the stack or dynamically allocated from
-** the heap. When WSD is unsupported, the variable declarations scattered
-** throughout the SQLite code must become constants instead. The SQLITE_WSD
-** macro is used for this purpose. And instead of referencing the variable
-** directly, we use its constant as a key to lookup the run-time allocated
-** buffer that holds real variable. The constant is also the initializer
-** for the run-time allocated buffer.
-**
-** In the usual case where WSD is supported, the SQLITE_WSD and GLOBAL
-** macros become no-ops and have zero performance impact.
-*/
-#ifdef SQLITE_OMIT_WSD
- #define SQLITE_WSD const
- #define GLOBAL(t,v) (*(t*)sqlite3_wsd_find((void*)&(v), sizeof(v)))
- #define sqlite3GlobalConfig GLOBAL(struct Sqlite3Config, sqlite3Config)
-SQLITE_API int SQLITE_STDCALL sqlite3_wsd_init(int N, int J);
-SQLITE_API void *SQLITE_STDCALL sqlite3_wsd_find(void *K, int L);
-#else
- #define SQLITE_WSD
- #define GLOBAL(t,v) v
- #define sqlite3GlobalConfig sqlite3Config
-#endif
-
-/*
-** The following macros are used to suppress compiler warnings and to
-** make it clear to human readers when a function parameter is deliberately
-** left unused within the body of a function. This usually happens when
-** a function is called via a function pointer. For example the
-** implementation of an SQL aggregate step callback may not use the
-** parameter indicating the number of arguments passed to the aggregate,
-** if it knows that this is enforced elsewhere.
-**
-** When a function parameter is not used at all within the body of a function,
-** it is generally named "NotUsed" or "NotUsed2" to make things even clearer.
-** However, these macros may also be used to suppress warnings related to
-** parameters that may or may not be used depending on compilation options.
-** For example those parameters only used in assert() statements. In these
-** cases the parameters are named as per the usual conventions.
-*/
-#define UNUSED_PARAMETER(x) (void)(x)
-#define UNUSED_PARAMETER2(x,y) UNUSED_PARAMETER(x),UNUSED_PARAMETER(y)
-
-/*
-** Forward references to structures
-*/
-typedef struct AggInfo AggInfo;
-typedef struct AuthContext AuthContext;
-typedef struct AutoincInfo AutoincInfo;
-typedef struct Bitvec Bitvec;
-typedef struct CollSeq CollSeq;
-typedef struct Column Column;
-typedef struct Db Db;
-typedef struct Schema Schema;
-typedef struct Expr Expr;
-typedef struct ExprList ExprList;
-typedef struct ExprSpan ExprSpan;
-typedef struct FKey FKey;
-typedef struct FuncDestructor FuncDestructor;
-typedef struct FuncDef FuncDef;
-typedef struct FuncDefHash FuncDefHash;
-typedef struct IdList IdList;
-typedef struct Index Index;
-typedef struct IndexSample IndexSample;
-typedef struct KeyClass KeyClass;
-typedef struct KeyInfo KeyInfo;
-typedef struct Lookaside Lookaside;
-typedef struct LookasideSlot LookasideSlot;
-typedef struct Module Module;
-typedef struct NameContext NameContext;
-typedef struct Parse Parse;
-typedef struct PreUpdate PreUpdate;
-typedef struct PrintfArguments PrintfArguments;
-typedef struct RowSet RowSet;
-typedef struct Savepoint Savepoint;
-typedef struct Select Select;
-typedef struct SQLiteThread SQLiteThread;
-typedef struct SelectDest SelectDest;
-typedef struct SrcList SrcList;
-typedef struct StrAccum StrAccum;
-typedef struct Table Table;
-typedef struct TableLock TableLock;
-typedef struct Token Token;
-typedef struct TreeView TreeView;
-typedef struct Trigger Trigger;
-typedef struct TriggerPrg TriggerPrg;
-typedef struct TriggerStep TriggerStep;
-typedef struct UnpackedRecord UnpackedRecord;
-typedef struct VTable VTable;
-typedef struct VtabCtx VtabCtx;
-typedef struct Walker Walker;
-typedef struct WhereInfo WhereInfo;
-typedef struct With With;
-
-/*
-** Defer sourcing vdbe.h and btree.h until after the "u8" and
-** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque
-** pointer types (i.e. FuncDef) defined above.
-*/
-/************** Include btree.h in the middle of sqliteInt.h *****************/
-/************** Begin file btree.h *******************************************/
-/*
-** 2001 September 15
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This header file defines the interface that the sqlite B-Tree file
-** subsystem. See comments in the source code for a detailed description
-** of what each interface routine does.
-*/
-#ifndef SQLITE_BTREE_H
-#define SQLITE_BTREE_H
-
-/* TODO: This definition is just included so other modules compile. It
-** needs to be revisited.
-*/
-#define SQLITE_N_BTREE_META 16
-
-/*
-** If defined as non-zero, auto-vacuum is enabled by default. Otherwise
-** it must be turned on for each database using "PRAGMA auto_vacuum = 1".
-*/
-#ifndef SQLITE_DEFAULT_AUTOVACUUM
- #define SQLITE_DEFAULT_AUTOVACUUM 0
-#endif
-
-#define BTREE_AUTOVACUUM_NONE 0 /* Do not do auto-vacuum */
-#define BTREE_AUTOVACUUM_FULL 1 /* Do full auto-vacuum */
-#define BTREE_AUTOVACUUM_INCR 2 /* Incremental vacuum */
-
-/*
-** Forward declarations of structure
-*/
-typedef struct Btree Btree;
-typedef struct BtCursor BtCursor;
-typedef struct BtShared BtShared;
-typedef struct BtreePayload BtreePayload;
-
-
-SQLITE_PRIVATE int sqlite3BtreeOpen(
- sqlite3_vfs *pVfs, /* VFS to use with this b-tree */
- const char *zFilename, /* Name of database file to open */
- sqlite3 *db, /* Associated database connection */
- Btree **ppBtree, /* Return open Btree* here */
- int flags, /* Flags */
- int vfsFlags /* Flags passed through to VFS open */
-);
-
-/* The flags parameter to sqlite3BtreeOpen can be the bitwise or of the
-** following values.
-**
-** NOTE: These values must match the corresponding PAGER_ values in
-** pager.h.
-*/
-#define BTREE_OMIT_JOURNAL 1 /* Do not create or use a rollback journal */
-#define BTREE_MEMORY 2 /* This is an in-memory DB */
-#define BTREE_SINGLE 4 /* The file contains at most 1 b-tree */
-#define BTREE_UNORDERED 8 /* Use of a hash implementation is OK */
-
-SQLITE_PRIVATE int sqlite3BtreeClose(Btree*);
-SQLITE_PRIVATE int sqlite3BtreeSetCacheSize(Btree*,int);
-SQLITE_PRIVATE int sqlite3BtreeSetSpillSize(Btree*,int);
-#if SQLITE_MAX_MMAP_SIZE>0
-SQLITE_PRIVATE int sqlite3BtreeSetMmapLimit(Btree*,sqlite3_int64);
-#endif
-SQLITE_PRIVATE int sqlite3BtreeSetPagerFlags(Btree*,unsigned);
-SQLITE_PRIVATE int sqlite3BtreeSetPageSize(Btree *p, int nPagesize, int nReserve, int eFix);
-SQLITE_PRIVATE int sqlite3BtreeGetPageSize(Btree*);
-SQLITE_PRIVATE int sqlite3BtreeMaxPageCount(Btree*,int);
-SQLITE_PRIVATE u32 sqlite3BtreeLastPage(Btree*);
-SQLITE_PRIVATE int sqlite3BtreeSecureDelete(Btree*,int);
-SQLITE_PRIVATE int sqlite3BtreeGetOptimalReserve(Btree*);
-SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p);
-SQLITE_PRIVATE int sqlite3BtreeSetAutoVacuum(Btree *, int);
-SQLITE_PRIVATE int sqlite3BtreeGetAutoVacuum(Btree *);
-SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree*,int);
-SQLITE_PRIVATE int sqlite3BtreeCommitPhaseOne(Btree*, const char *zMaster);
-SQLITE_PRIVATE int sqlite3BtreeCommitPhaseTwo(Btree*, int);
-SQLITE_PRIVATE int sqlite3BtreeCommit(Btree*);
-SQLITE_PRIVATE int sqlite3BtreeRollback(Btree*,int,int);
-SQLITE_PRIVATE int sqlite3BtreeBeginStmt(Btree*,int);
-SQLITE_PRIVATE int sqlite3BtreeCreateTable(Btree*, int*, int flags);
-SQLITE_PRIVATE int sqlite3BtreeIsInTrans(Btree*);
-SQLITE_PRIVATE int sqlite3BtreeIsInReadTrans(Btree*);
-SQLITE_PRIVATE int sqlite3BtreeIsInBackup(Btree*);
-SQLITE_PRIVATE void *sqlite3BtreeSchema(Btree *, int, void(*)(void *));
-SQLITE_PRIVATE int sqlite3BtreeSchemaLocked(Btree *pBtree);
-SQLITE_PRIVATE int sqlite3BtreeLockTable(Btree *pBtree, int iTab, u8 isWriteLock);
-SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *, int, int);
-
-SQLITE_PRIVATE const char *sqlite3BtreeGetFilename(Btree *);
-SQLITE_PRIVATE const char *sqlite3BtreeGetJournalname(Btree *);
-SQLITE_PRIVATE int sqlite3BtreeCopyFile(Btree *, Btree *);
-
-SQLITE_PRIVATE int sqlite3BtreeIncrVacuum(Btree *);
-
-/* The flags parameter to sqlite3BtreeCreateTable can be the bitwise OR
-** of the flags shown below.
-**
-** Every SQLite table must have either BTREE_INTKEY or BTREE_BLOBKEY set.
-** With BTREE_INTKEY, the table key is a 64-bit integer and arbitrary data
-** is stored in the leaves. (BTREE_INTKEY is used for SQL tables.) With
-** BTREE_BLOBKEY, the key is an arbitrary BLOB and no content is stored
-** anywhere - the key is the content. (BTREE_BLOBKEY is used for SQL
-** indices.)
-*/
-#define BTREE_INTKEY 1 /* Table has only 64-bit signed integer keys */
-#define BTREE_BLOBKEY 2 /* Table has keys only - no data */
-
-SQLITE_PRIVATE int sqlite3BtreeDropTable(Btree*, int, int*);
-SQLITE_PRIVATE int sqlite3BtreeClearTable(Btree*, int, int*);
-SQLITE_PRIVATE int sqlite3BtreeClearTableOfCursor(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreeTripAllCursors(Btree*, int, int);
-
-SQLITE_PRIVATE void sqlite3BtreeGetMeta(Btree *pBtree, int idx, u32 *pValue);
-SQLITE_PRIVATE int sqlite3BtreeUpdateMeta(Btree*, int idx, u32 value);
-
-SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p);
-
-/*
-** The second parameter to sqlite3BtreeGetMeta or sqlite3BtreeUpdateMeta
-** should be one of the following values. The integer values are assigned
-** to constants so that the offset of the corresponding field in an
-** SQLite database header may be found using the following formula:
-**
-** offset = 36 + (idx * 4)
-**
-** For example, the free-page-count field is located at byte offset 36 of
-** the database file header. The incr-vacuum-flag field is located at
-** byte offset 64 (== 36+4*7).
-**
-** The BTREE_DATA_VERSION value is not really a value stored in the header.
-** It is a read-only number computed by the pager. But we merge it with
-** the header value access routines since its access pattern is the same.
-** Call it a "virtual meta value".
-*/
-#define BTREE_FREE_PAGE_COUNT 0
-#define BTREE_SCHEMA_VERSION 1
-#define BTREE_FILE_FORMAT 2
-#define BTREE_DEFAULT_CACHE_SIZE 3
-#define BTREE_LARGEST_ROOT_PAGE 4
-#define BTREE_TEXT_ENCODING 5
-#define BTREE_USER_VERSION 6
-#define BTREE_INCR_VACUUM 7
-#define BTREE_APPLICATION_ID 8
-#define BTREE_DATA_VERSION 15 /* A virtual meta-value */
-
-/*
-** Kinds of hints that can be passed into the sqlite3BtreeCursorHint()
-** interface.
-**
-** BTREE_HINT_RANGE (arguments: Expr*, Mem*)
-**
-** The first argument is an Expr* (which is guaranteed to be constant for
-** the lifetime of the cursor) that defines constraints on which rows
-** might be fetched with this cursor. The Expr* tree may contain
-** TK_REGISTER nodes that refer to values stored in the array of registers
-** passed as the second parameter. In other words, if Expr.op==TK_REGISTER
-** then the value of the node is the value in Mem[pExpr.iTable]. Any
-** TK_COLUMN node in the expression tree refers to the Expr.iColumn-th
-** column of the b-tree of the cursor. The Expr tree will not contain
-** any function calls nor subqueries nor references to b-trees other than
-** the cursor being hinted.
-**
-** The design of the _RANGE hint is aid b-tree implementations that try
-** to prefetch content from remote machines - to provide those
-** implementations with limits on what needs to be prefetched and thereby
-** reduce network bandwidth.
-**
-** Note that BTREE_HINT_FLAGS with BTREE_BULKLOAD is the only hint used by
-** standard SQLite. The other hints are provided for extentions that use
-** the SQLite parser and code generator but substitute their own storage
-** engine.
-*/
-#define BTREE_HINT_RANGE 0 /* Range constraints on queries */
-
-/*
-** Values that may be OR'd together to form the argument to the
-** BTREE_HINT_FLAGS hint for sqlite3BtreeCursorHint():
-**
-** The BTREE_BULKLOAD flag is set on index cursors when the index is going
-** to be filled with content that is already in sorted order.
-**
-** The BTREE_SEEK_EQ flag is set on cursors that will get OP_SeekGE or
-** OP_SeekLE opcodes for a range search, but where the range of entries
-** selected will all have the same key. In other words, the cursor will
-** be used only for equality key searches.
-**
-*/
-#define BTREE_BULKLOAD 0x00000001 /* Used to full index in sorted order */
-#define BTREE_SEEK_EQ 0x00000002 /* EQ seeks only - no range seeks */
-
-/*
-** Flags passed as the third argument to sqlite3BtreeCursor().
-**
-** For read-only cursors the wrFlag argument is always zero. For read-write
-** cursors it may be set to either (BTREE_WRCSR|BTREE_FORDELETE) or just
-** (BTREE_WRCSR). If the BTREE_FORDELETE bit is set, then the cursor will
-** only be used by SQLite for the following:
-**
-** * to seek to and then delete specific entries, and/or
-**
-** * to read values that will be used to create keys that other
-** BTREE_FORDELETE cursors will seek to and delete.
-**
-** The BTREE_FORDELETE flag is an optimization hint. It is not used by
-** by this, the native b-tree engine of SQLite, but it is available to
-** alternative storage engines that might be substituted in place of this
-** b-tree system. For alternative storage engines in which a delete of
-** the main table row automatically deletes corresponding index rows,
-** the FORDELETE flag hint allows those alternative storage engines to
-** skip a lot of work. Namely: FORDELETE cursors may treat all SEEK
-** and DELETE operations as no-ops, and any READ operation against a
-** FORDELETE cursor may return a null row: 0x01 0x00.
-*/
-#define BTREE_WRCSR 0x00000004 /* read-write cursor */
-#define BTREE_FORDELETE 0x00000008 /* Cursor is for seek/delete only */
-
-SQLITE_PRIVATE int sqlite3BtreeCursor(
- Btree*, /* BTree containing table to open */
- int iTable, /* Index of root page */
- int wrFlag, /* 1 for writing. 0 for read-only */
- struct KeyInfo*, /* First argument to compare function */
- BtCursor *pCursor /* Space to write cursor structure */
-);
-SQLITE_PRIVATE int sqlite3BtreeCursorSize(void);
-SQLITE_PRIVATE void sqlite3BtreeCursorZero(BtCursor*);
-SQLITE_PRIVATE void sqlite3BtreeCursorHintFlags(BtCursor*, unsigned);
-#ifdef SQLITE_ENABLE_CURSOR_HINTS
-SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor*, int, ...);
-#endif
-
-SQLITE_PRIVATE int sqlite3BtreeCloseCursor(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
- BtCursor*,
- UnpackedRecord *pUnKey,
- i64 intKey,
- int bias,
- int *pRes
-);
-SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor*, int*);
-SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*, u8 flags);
-
-/* Allowed flags for the 2nd argument to sqlite3BtreeDelete() */
-#define BTREE_SAVEPOSITION 0x02 /* Leave cursor pointing at NEXT or PREV */
-#define BTREE_AUXDELETE 0x04 /* not the primary delete operation */
-
-/* An instance of the BtreePayload object describes the content of a single
-** entry in either an index or table btree.
-**
-** Index btrees (used for indexes and also WITHOUT ROWID tables) contain
-** an arbitrary key and no data. These btrees have pKey,nKey set to their
-** key and pData,nData,nZero set to zero.
-**
-** Table btrees (used for rowid tables) contain an integer rowid used as
-** the key and passed in the nKey field. The pKey field is zero.
-** pData,nData hold the content of the new entry. nZero extra zero bytes
-** are appended to the end of the content when constructing the entry.
-**
-** This object is used to pass information into sqlite3BtreeInsert(). The
-** same information used to be passed as five separate parameters. But placing
-** the information into this object helps to keep the interface more
-** organized and understandable, and it also helps the resulting code to
-** run a little faster by using fewer registers for parameter passing.
-*/
-struct BtreePayload {
- const void *pKey; /* Key content for indexes. NULL for tables */
- sqlite3_int64 nKey; /* Size of pKey for indexes. PRIMARY KEY for tabs */
- const void *pData; /* Data for tables. NULL for indexes */
- int nData; /* Size of pData. 0 if none. */
- int nZero; /* Extra zero data appended after pData,nData */
-};
-
-SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const BtreePayload *pPayload,
- int bias, int seekResult);
-SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes);
-SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes);
-SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int *pRes);
-SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int *pRes);
-SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor*, u32 offset, u32 amt, void*);
-SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt);
-SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*);
-
-SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*);
-SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*);
-
-SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*);
-SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *);
-SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *);
-SQLITE_PRIVATE int sqlite3BtreeSetVersion(Btree *pBt, int iVersion);
-SQLITE_PRIVATE int sqlite3BtreeCursorHasHint(BtCursor*, unsigned int mask);
-SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *pBt);
-SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void);
-
-#ifndef NDEBUG
-SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor*);
-#endif
-
-#ifndef SQLITE_OMIT_BTREECOUNT
-SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *, i64 *);
-#endif
-
-#ifdef SQLITE_TEST
-SQLITE_PRIVATE int sqlite3BtreeCursorInfo(BtCursor*, int*, int);
-SQLITE_PRIVATE void sqlite3BtreeCursorList(Btree*);
-#endif
-
-#ifndef SQLITE_OMIT_WAL
-SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *);
-#endif
-
-/*
-** If we are not using shared cache, then there is no need to
-** use mutexes to access the BtShared structures. So make the
-** Enter and Leave procedures no-ops.
-*/
-#ifndef SQLITE_OMIT_SHARED_CACHE
-SQLITE_PRIVATE void sqlite3BtreeEnter(Btree*);
-SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3*);
-SQLITE_PRIVATE int sqlite3BtreeSharable(Btree*);
-SQLITE_PRIVATE void sqlite3BtreeEnterCursor(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreeConnectionCount(Btree*);
-#else
-# define sqlite3BtreeEnter(X)
-# define sqlite3BtreeEnterAll(X)
-# define sqlite3BtreeSharable(X) 0
-# define sqlite3BtreeEnterCursor(X)
-# define sqlite3BtreeConnectionCount(X) 1
-#endif
-
-#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE
-SQLITE_PRIVATE void sqlite3BtreeLeave(Btree*);
-SQLITE_PRIVATE void sqlite3BtreeLeaveCursor(BtCursor*);
-SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3*);
-#ifndef NDEBUG
- /* These routines are used inside assert() statements only. */
-SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree*);
-SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3*);
-SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3*,int,Schema*);
-#endif
-#else
-
-# define sqlite3BtreeLeave(X)
-# define sqlite3BtreeLeaveCursor(X)
-# define sqlite3BtreeLeaveAll(X)
-
-# define sqlite3BtreeHoldsMutex(X) 1
-# define sqlite3BtreeHoldsAllMutexes(X) 1
-# define sqlite3SchemaMutexHeld(X,Y,Z) 1
-#endif
-
-
-#endif /* SQLITE_BTREE_H */
-
-/************** End of btree.h ***********************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-/************** Include vdbe.h in the middle of sqliteInt.h ******************/
-/************** Begin file vdbe.h ********************************************/
-/*
-** 2001 September 15
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** Header file for the Virtual DataBase Engine (VDBE)
-**
-** This header defines the interface to the virtual database engine
-** or VDBE. The VDBE implements an abstract machine that runs a
-** simple program to access and modify the underlying database.
-*/
-#ifndef SQLITE_VDBE_H
-#define SQLITE_VDBE_H
-/* #include */
-
-/*
-** A single VDBE is an opaque structure named "Vdbe". Only routines
-** in the source file sqliteVdbe.c are allowed to see the insides
-** of this structure.
-*/
-typedef struct Vdbe Vdbe;
-
-/*
-** The names of the following types declared in vdbeInt.h are required
-** for the VdbeOp definition.
-*/
-typedef struct Mem Mem;
-typedef struct SubProgram SubProgram;
-
-/*
-** A single instruction of the virtual machine has an opcode
-** and as many as three operands. The instruction is recorded
-** as an instance of the following structure:
-*/
-struct VdbeOp {
- u8 opcode; /* What operation to perform */
- signed char p4type; /* One of the P4_xxx constants for p4 */
- u8 notUsed1;
- u8 p5; /* Fifth parameter is an unsigned character */
- int p1; /* First operand */
- int p2; /* Second parameter (often the jump destination) */
- int p3; /* The third parameter */
- union p4union { /* fourth parameter */
- int i; /* Integer value if p4type==P4_INT32 */
- void *p; /* Generic pointer */
- char *z; /* Pointer to data for string (char array) types */
- i64 *pI64; /* Used when p4type is P4_INT64 */
- double *pReal; /* Used when p4type is P4_REAL */
- FuncDef *pFunc; /* Used when p4type is P4_FUNCDEF */
- sqlite3_context *pCtx; /* Used when p4type is P4_FUNCCTX */
- CollSeq *pColl; /* Used when p4type is P4_COLLSEQ */
- Mem *pMem; /* Used when p4type is P4_MEM */
- VTable *pVtab; /* Used when p4type is P4_VTAB */
- KeyInfo *pKeyInfo; /* Used when p4type is P4_KEYINFO */
- int *ai; /* Used when p4type is P4_INTARRAY */
- SubProgram *pProgram; /* Used when p4type is P4_SUBPROGRAM */
- Table *pTab; /* Used when p4type is P4_TABLE */
-#ifdef SQLITE_ENABLE_CURSOR_HINTS
- Expr *pExpr; /* Used when p4type is P4_EXPR */
-#endif
- int (*xAdvance)(BtCursor *, int *);
- } p4;
-#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
- char *zComment; /* Comment to improve readability */
-#endif
-#ifdef VDBE_PROFILE
- u32 cnt; /* Number of times this instruction was executed */
- u64 cycles; /* Total time spent executing this instruction */
-#endif
-#ifdef SQLITE_VDBE_COVERAGE
- int iSrcLine; /* Source-code line that generated this opcode */
-#endif
-};
-typedef struct VdbeOp VdbeOp;
-
-
-/*
-** A sub-routine used to implement a trigger program.
-*/
-struct SubProgram {
- VdbeOp *aOp; /* Array of opcodes for sub-program */
- int nOp; /* Elements in aOp[] */
- int nMem; /* Number of memory cells required */
- int nCsr; /* Number of cursors required */
- int nOnce; /* Number of OP_Once instructions */
- void *token; /* id that may be used to recursive triggers */
- SubProgram *pNext; /* Next sub-program already visited */
-};
-
-/*
-** A smaller version of VdbeOp used for the VdbeAddOpList() function because
-** it takes up less space.
-*/
-struct VdbeOpList {
- u8 opcode; /* What operation to perform */
- signed char p1; /* First operand */
- signed char p2; /* Second parameter (often the jump destination) */
- signed char p3; /* Third parameter */
-};
-typedef struct VdbeOpList VdbeOpList;
-
-/*
-** Allowed values of VdbeOp.p4type
-*/
-#define P4_NOTUSED 0 /* The P4 parameter is not used */
-#define P4_DYNAMIC (-1) /* Pointer to a string obtained from sqliteMalloc() */
-#define P4_STATIC (-2) /* Pointer to a static string */
-#define P4_COLLSEQ (-4) /* P4 is a pointer to a CollSeq structure */
-#define P4_FUNCDEF (-5) /* P4 is a pointer to a FuncDef structure */
-#define P4_KEYINFO (-6) /* P4 is a pointer to a KeyInfo structure */
-#define P4_EXPR (-7) /* P4 is a pointer to an Expr tree */
-#define P4_MEM (-8) /* P4 is a pointer to a Mem* structure */
-#define P4_TRANSIENT 0 /* P4 is a pointer to a transient string */
-#define P4_VTAB (-10) /* P4 is a pointer to an sqlite3_vtab structure */
-#define P4_MPRINTF (-11) /* P4 is a string obtained from sqlite3_mprintf() */
-#define P4_REAL (-12) /* P4 is a 64-bit floating point value */
-#define P4_INT64 (-13) /* P4 is a 64-bit signed integer */
-#define P4_INT32 (-14) /* P4 is a 32-bit signed integer */
-#define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */
-#define P4_SUBPROGRAM (-18) /* P4 is a pointer to a SubProgram structure */
-#define P4_ADVANCE (-19) /* P4 is a pointer to BtreeNext() or BtreePrev() */
-#define P4_TABLE (-20) /* P4 is a pointer to a Table structure */
-#define P4_FUNCCTX (-21) /* P4 is a pointer to an sqlite3_context object */
-
-/* Error message codes for OP_Halt */
-#define P5_ConstraintNotNull 1
-#define P5_ConstraintUnique 2
-#define P5_ConstraintCheck 3
-#define P5_ConstraintFK 4
-
-/*
-** The Vdbe.aColName array contains 5n Mem structures, where n is the
-** number of columns of data returned by the statement.
-*/
-#define COLNAME_NAME 0
-#define COLNAME_DECLTYPE 1
-#define COLNAME_DATABASE 2
-#define COLNAME_TABLE 3
-#define COLNAME_COLUMN 4
-#ifdef SQLITE_ENABLE_COLUMN_METADATA
-# define COLNAME_N 5 /* Number of COLNAME_xxx symbols */
-#else
-# ifdef SQLITE_OMIT_DECLTYPE
-# define COLNAME_N 1 /* Store only the name */
-# else
-# define COLNAME_N 2 /* Store the name and decltype */
-# endif
-#endif
-
-/*
-** The following macro converts a relative address in the p2 field
-** of a VdbeOp structure into a negative number so that
-** sqlite3VdbeAddOpList() knows that the address is relative. Calling
-** the macro again restores the address.
-*/
-#define ADDR(X) (-1-(X))
-
-/*
-** The makefile scans the vdbe.c source file and creates the "opcodes.h"
-** header file that defines a number for each opcode used by the VDBE.
-*/
-/************** Include opcodes.h in the middle of vdbe.h ********************/
-/************** Begin file opcodes.h *****************************************/
-/* Automatically generated. Do not edit */
-/* See the tool/mkopcodeh.tcl script for details */
-#define OP_Savepoint 0
-#define OP_AutoCommit 1
-#define OP_Transaction 2
-#define OP_SorterNext 3
-#define OP_PrevIfOpen 4
-#define OP_NextIfOpen 5
-#define OP_Prev 6
-#define OP_Next 7
-#define OP_Checkpoint 8
-#define OP_JournalMode 9
-#define OP_Vacuum 10
-#define OP_VFilter 11 /* synopsis: iplan=r[P3] zplan='P4' */
-#define OP_VUpdate 12 /* synopsis: data=r[P3@P2] */
-#define OP_Goto 13
-#define OP_Gosub 14
-#define OP_InitCoroutine 15
-#define OP_Yield 16
-#define OP_MustBeInt 17
-#define OP_Jump 18
-#define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */
-#define OP_Once 20
-#define OP_If 21
-#define OP_IfNot 22
-#define OP_SeekLT 23 /* synopsis: key=r[P3@P4] */
-#define OP_SeekLE 24 /* synopsis: key=r[P3@P4] */
-#define OP_SeekGE 25 /* synopsis: key=r[P3@P4] */
-#define OP_SeekGT 26 /* synopsis: key=r[P3@P4] */
-#define OP_Or 27 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */
-#define OP_And 28 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */
-#define OP_NoConflict 29 /* synopsis: key=r[P3@P4] */
-#define OP_NotFound 30 /* synopsis: key=r[P3@P4] */
-#define OP_Found 31 /* synopsis: key=r[P3@P4] */
-#define OP_SeekRowid 32 /* synopsis: intkey=r[P3] */
-#define OP_NotExists 33 /* synopsis: intkey=r[P3] */
-#define OP_IsNull 34 /* same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */
-#define OP_NotNull 35 /* same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */
-#define OP_Ne 36 /* same as TK_NE, synopsis: if r[P1]!=r[P3] goto P2 */
-#define OP_Eq 37 /* same as TK_EQ, synopsis: if r[P1]==r[P3] goto P2 */
-#define OP_Gt 38 /* same as TK_GT, synopsis: if r[P1]>r[P3] goto P2 */
-#define OP_Le 39 /* same as TK_LE, synopsis: if r[P1]<=r[P3] goto P2 */
-#define OP_Lt 40 /* same as TK_LT, synopsis: if r[P1]=r[P3] goto P2 */
-#define OP_Last 42
-#define OP_BitAnd 43 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */
-#define OP_BitOr 44 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */
-#define OP_ShiftLeft 45 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<>r[P1] */
-#define OP_Add 47 /* same as TK_PLUS, synopsis: r[P3]=r[P1]+r[P2] */
-#define OP_Subtract 48 /* same as TK_MINUS, synopsis: r[P3]=r[P2]-r[P1] */
-#define OP_Multiply 49 /* same as TK_STAR, synopsis: r[P3]=r[P1]*r[P2] */
-#define OP_Divide 50 /* same as TK_SLASH, synopsis: r[P3]=r[P2]/r[P1] */
-#define OP_Remainder 51 /* same as TK_REM, synopsis: r[P3]=r[P2]%r[P1] */
-#define OP_Concat 52 /* same as TK_CONCAT, synopsis: r[P3]=r[P2]+r[P1] */
-#define OP_SorterSort 53
-#define OP_BitNot 54 /* same as TK_BITNOT, synopsis: r[P1]= ~r[P1] */
-#define OP_Sort 55
-#define OP_Rewind 56
-#define OP_IdxLE 57 /* synopsis: key=r[P3@P4] */
-#define OP_IdxGT 58 /* synopsis: key=r[P3@P4] */
-#define OP_IdxLT 59 /* synopsis: key=r[P3@P4] */
-#define OP_IdxGE 60 /* synopsis: key=r[P3@P4] */
-#define OP_RowSetRead 61 /* synopsis: r[P3]=rowset(P1) */
-#define OP_RowSetTest 62 /* synopsis: if r[P3] in rowset(P1) goto P2 */
-#define OP_Program 63
-#define OP_FkIfZero 64 /* synopsis: if fkctr[P1]==0 goto P2 */
-#define OP_IfPos 65 /* synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */
-#define OP_IfNotZero 66 /* synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2 */
-#define OP_DecrJumpZero 67 /* synopsis: if (--r[P1])==0 goto P2 */
-#define OP_IncrVacuum 68
-#define OP_VNext 69
-#define OP_Init 70 /* synopsis: Start at P2 */
-#define OP_Return 71
-#define OP_EndCoroutine 72
-#define OP_HaltIfNull 73 /* synopsis: if r[P3]=null halt */
-#define OP_Halt 74
-#define OP_Integer 75 /* synopsis: r[P2]=P1 */
-#define OP_Int64 76 /* synopsis: r[P2]=P4 */
-#define OP_String 77 /* synopsis: r[P2]='P4' (len=P1) */
-#define OP_Null 78 /* synopsis: r[P2..P3]=NULL */
-#define OP_SoftNull 79 /* synopsis: r[P1]=NULL */
-#define OP_Blob 80 /* synopsis: r[P2]=P4 (len=P1) */
-#define OP_Variable 81 /* synopsis: r[P2]=parameter(P1,P4) */
-#define OP_Move 82 /* synopsis: r[P2@P3]=r[P1@P3] */
-#define OP_Copy 83 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */
-#define OP_SCopy 84 /* synopsis: r[P2]=r[P1] */
-#define OP_IntCopy 85 /* synopsis: r[P2]=r[P1] */
-#define OP_ResultRow 86 /* synopsis: output=r[P1@P2] */
-#define OP_CollSeq 87
-#define OP_Function0 88 /* synopsis: r[P3]=func(r[P2@P5]) */
-#define OP_Function 89 /* synopsis: r[P3]=func(r[P2@P5]) */
-#define OP_AddImm 90 /* synopsis: r[P1]=r[P1]+P2 */
-#define OP_RealAffinity 91
-#define OP_Cast 92 /* synopsis: affinity(r[P1]) */
-#define OP_Permutation 93
-#define OP_Compare 94 /* synopsis: r[P1@P3] <-> r[P2@P3] */
-#define OP_Column 95 /* synopsis: r[P3]=PX */
-#define OP_Affinity 96 /* synopsis: affinity(r[P1@P2]) */
-#define OP_String8 97 /* same as TK_STRING, synopsis: r[P2]='P4' */
-#define OP_MakeRecord 98 /* synopsis: r[P3]=mkrec(r[P1@P2]) */
-#define OP_Count 99 /* synopsis: r[P2]=count() */
-#define OP_ReadCookie 100
-#define OP_SetCookie 101
-#define OP_ReopenIdx 102 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenRead 103 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenWrite 104 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenAutoindex 105 /* synopsis: nColumn=P2 */
-#define OP_OpenEphemeral 106 /* synopsis: nColumn=P2 */
-#define OP_SorterOpen 107
-#define OP_SequenceTest 108 /* synopsis: if( cursor[P1].ctr++ ) pc = P2 */
-#define OP_OpenPseudo 109 /* synopsis: P3 columns in r[P2] */
-#define OP_Close 110
-#define OP_ColumnsUsed 111
-#define OP_Sequence 112 /* synopsis: r[P2]=cursor[P1].ctr++ */
-#define OP_NewRowid 113 /* synopsis: r[P2]=rowid */
-#define OP_Insert 114 /* synopsis: intkey=r[P3] data=r[P2] */
-#define OP_InsertInt 115 /* synopsis: intkey=P3 data=r[P2] */
-#define OP_Delete 116
-#define OP_ResetCount 117
-#define OP_SorterCompare 118 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */
-#define OP_SorterData 119 /* synopsis: r[P2]=data */
-#define OP_RowKey 120 /* synopsis: r[P2]=key */
-#define OP_RowData 121 /* synopsis: r[P2]=data */
-#define OP_Rowid 122 /* synopsis: r[P2]=rowid */
-#define OP_NullRow 123
-#define OP_SorterInsert 124
-#define OP_IdxInsert 125 /* synopsis: key=r[P2] */
-#define OP_IdxDelete 126 /* synopsis: key=r[P2@P3] */
-#define OP_Seek 127 /* synopsis: Move P3 to P1.rowid */
-#define OP_IdxRowid 128 /* synopsis: r[P2]=rowid */
-#define OP_Destroy 129
-#define OP_Clear 130
-#define OP_ResetSorter 131
-#define OP_CreateIndex 132 /* synopsis: r[P2]=root iDb=P1 */
-#define OP_Real 133 /* same as TK_FLOAT, synopsis: r[P2]=P4 */
-#define OP_CreateTable 134 /* synopsis: r[P2]=root iDb=P1 */
-#define OP_ParseSchema 135
-#define OP_LoadAnalysis 136
-#define OP_DropTable 137
-#define OP_DropIndex 138
-#define OP_DropTrigger 139
-#define OP_IntegrityCk 140
-#define OP_RowSetAdd 141 /* synopsis: rowset(P1)=r[P2] */
-#define OP_Param 142
-#define OP_FkCounter 143 /* synopsis: fkctr[P1]+=P2 */
-#define OP_MemMax 144 /* synopsis: r[P1]=max(r[P1],r[P2]) */
-#define OP_OffsetLimit 145 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */
-#define OP_AggStep0 146 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggStep 147 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggFinal 148 /* synopsis: accum=r[P1] N=P2 */
-#define OP_Expire 149
-#define OP_TableLock 150 /* synopsis: iDb=P1 root=P2 write=P3 */
-#define OP_VBegin 151
-#define OP_VCreate 152
-#define OP_VDestroy 153
-#define OP_VOpen 154
-#define OP_VColumn 155 /* synopsis: r[P3]=vcolumn(P2) */
-#define OP_VRename 156
-#define OP_Pagecount 157
-#define OP_MaxPgcnt 158
-#define OP_CursorHint 159
-#define OP_Noop 160
-#define OP_Explain 161
-
-/* Properties such as "out2" or "jump" that are specified in
-** comments following the "case" for each opcode in the vdbe.c
-** are encoded into bitvectors as follows:
-*/
-#define OPFLG_JUMP 0x01 /* jump: P2 holds jmp target */
-#define OPFLG_IN1 0x02 /* in1: P1 is an input */
-#define OPFLG_IN2 0x04 /* in2: P2 is an input */
-#define OPFLG_IN3 0x08 /* in3: P3 is an input */
-#define OPFLG_OUT2 0x10 /* out2: P2 is an output */
-#define OPFLG_OUT3 0x20 /* out3: P3 is an output */
-#define OPFLG_INITIALIZER {\
-/* 0 */ 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01,\
-/* 8 */ 0x00, 0x10, 0x00, 0x01, 0x00, 0x01, 0x01, 0x01,\
-/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x03, 0x03, 0x09,\
-/* 24 */ 0x09, 0x09, 0x09, 0x26, 0x26, 0x09, 0x09, 0x09,\
-/* 32 */ 0x09, 0x09, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\
-/* 40 */ 0x0b, 0x0b, 0x01, 0x26, 0x26, 0x26, 0x26, 0x26,\
-/* 48 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x01, 0x12, 0x01,\
-/* 56 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x23, 0x0b, 0x01,\
-/* 64 */ 0x01, 0x03, 0x03, 0x03, 0x01, 0x01, 0x01, 0x02,\
-/* 72 */ 0x02, 0x08, 0x00, 0x10, 0x10, 0x10, 0x10, 0x00,\
-/* 80 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\
-/* 88 */ 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00, 0x00,\
-/* 96 */ 0x00, 0x10, 0x00, 0x10, 0x10, 0x00, 0x00, 0x00,\
-/* 104 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 112 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 120 */ 0x00, 0x00, 0x10, 0x00, 0x04, 0x04, 0x00, 0x00,\
-/* 128 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x10, 0x00,\
-/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10, 0x00,\
-/* 144 */ 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00,\
-/* 160 */ 0x00, 0x00,}
-
-/* The sqlite3P2Values() routine is able to run faster if it knows
-** the value of the largest JUMP opcode. The smaller the maximum
-** JUMP opcode the better, so the mkopcodeh.tcl script that
-** generated this include file strives to group all JUMP opcodes
-** together near the beginning of the list.
-*/
-#define SQLITE_MX_JUMP_OPCODE 70 /* Maximum JUMP opcode */
-
-/************** End of opcodes.h *********************************************/
-/************** Continuing where we left off in vdbe.h ***********************/
-
-/*
-** Prototypes for the VDBE interface. See comments on the implementation
-** for a description of what each of these routines does.
-*/
-SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse*);
-SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe*,int);
-SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe*,int,int);
-SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe*,int,int,int);
-SQLITE_PRIVATE int sqlite3VdbeGoto(Vdbe*,int);
-SQLITE_PRIVATE int sqlite3VdbeLoadString(Vdbe*,int,const char*);
-SQLITE_PRIVATE void sqlite3VdbeMultiLoad(Vdbe*,int,const char*,...);
-SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe*,int,int,int,int);
-SQLITE_PRIVATE int sqlite3VdbeAddOp4(Vdbe*,int,int,int,int,const char *zP4,int);
-SQLITE_PRIVATE int sqlite3VdbeAddOp4Dup8(Vdbe*,int,int,int,int,const u8*,int);
-SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(Vdbe*,int,int,int,int,int);
-SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe*,int);
-#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS)
-SQLITE_PRIVATE void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N);
-#else
-# define sqlite3VdbeVerifyNoMallocRequired(A,B)
-#endif
-SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp, int iLineno);
-SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*);
-SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe*, u32 addr, u8);
-SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, u32 addr, int P1);
-SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, u32 addr, int P2);
-SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, u32 addr, int P3);
-SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u8 P5);
-SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr);
-SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe*, int addr);
-SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe*, u8 op);
-SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe*, int addr, const char *zP4, int N);
-SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse*, Index*);
-SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int);
-SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe*, int);
-SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeReusable(Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3*,Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeMakeReady(Vdbe*,Parse*);
-SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe*, int);
-SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe*);
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *, int);
-#endif
-SQLITE_PRIVATE void sqlite3VdbeResetStepResult(Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe*);
-SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe*,int);
-SQLITE_PRIVATE int sqlite3VdbeSetColName(Vdbe*, int, int, const char *, void(*)(void*));
-SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe*);
-SQLITE_PRIVATE sqlite3 *sqlite3VdbeDb(Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeSetSql(Vdbe*, const char *z, int n, int);
-SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe*,Vdbe*);
-SQLITE_PRIVATE VdbeOp *sqlite3VdbeTakeOpArray(Vdbe*, int*, int*);
-SQLITE_PRIVATE sqlite3_value *sqlite3VdbeGetBoundValue(Vdbe*, int, u8);
-SQLITE_PRIVATE void sqlite3VdbeSetVarmask(Vdbe*, int);
-#ifndef SQLITE_OMIT_TRACE
-SQLITE_PRIVATE char *sqlite3VdbeExpandSql(Vdbe*, const char*);
-#endif
-SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*);
-
-SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*);
-SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*);
-SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(int, const void *, UnpackedRecord *, int);
-SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo *, char *, int, char **);
-
-typedef int (*RecordCompare)(int,const void*,UnpackedRecord*);
-SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*);
-
-#ifndef SQLITE_OMIT_TRIGGER
-SQLITE_PRIVATE void sqlite3VdbeLinkSubProgram(Vdbe *, SubProgram *);
-#endif
-
-/* Use SQLITE_ENABLE_COMMENTS to enable generation of extra comments on
-** each VDBE opcode.
-**
-** Use the SQLITE_ENABLE_MODULE_COMMENTS macro to see some extra no-op
-** comments in VDBE programs that show key decision points in the code
-** generator.
-*/
-#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
-SQLITE_PRIVATE void sqlite3VdbeComment(Vdbe*, const char*, ...);
-# define VdbeComment(X) sqlite3VdbeComment X
-SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...);
-# define VdbeNoopComment(X) sqlite3VdbeNoopComment X
-# ifdef SQLITE_ENABLE_MODULE_COMMENTS
-# define VdbeModuleComment(X) sqlite3VdbeNoopComment X
-# else
-# define VdbeModuleComment(X)
-# endif
-#else
-# define VdbeComment(X)
-# define VdbeNoopComment(X)
-# define VdbeModuleComment(X)
-#endif
-
-/*
-** The VdbeCoverage macros are used to set a coverage testing point
-** for VDBE branch instructions. The coverage testing points are line
-** numbers in the sqlite3.c source file. VDBE branch coverage testing
-** only works with an amalagmation build. That's ok since a VDBE branch
-** coverage build designed for testing the test suite only. No application
-** should ever ship with VDBE branch coverage measuring turned on.
-**
-** VdbeCoverage(v) // Mark the previously coded instruction
-** // as a branch
-**
-** VdbeCoverageIf(v, conditional) // Mark previous if conditional true
-**
-** VdbeCoverageAlwaysTaken(v) // Previous branch is always taken
-**
-** VdbeCoverageNeverTaken(v) // Previous branch is never taken
-**
-** Every VDBE branch operation must be tagged with one of the macros above.
-** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and
-** -DSQLITE_DEBUG then an ALWAYS() will fail in the vdbeTakeBranch()
-** routine in vdbe.c, alerting the developer to the missed tag.
-*/
-#ifdef SQLITE_VDBE_COVERAGE
-SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe*,int);
-# define VdbeCoverage(v) sqlite3VdbeSetLineNumber(v,__LINE__)
-# define VdbeCoverageIf(v,x) if(x)sqlite3VdbeSetLineNumber(v,__LINE__)
-# define VdbeCoverageAlwaysTaken(v) sqlite3VdbeSetLineNumber(v,2);
-# define VdbeCoverageNeverTaken(v) sqlite3VdbeSetLineNumber(v,1);
-# define VDBE_OFFSET_LINENO(x) (__LINE__+x)
-#else
-# define VdbeCoverage(v)
-# define VdbeCoverageIf(v,x)
-# define VdbeCoverageAlwaysTaken(v)
-# define VdbeCoverageNeverTaken(v)
-# define VDBE_OFFSET_LINENO(x) 0
-#endif
-
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
-SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const char*);
-#else
-# define sqlite3VdbeScanStatus(a,b,c,d,e)
-#endif
-
-#endif /* SQLITE_VDBE_H */
-
-/************** End of vdbe.h ************************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-/************** Include pager.h in the middle of sqliteInt.h *****************/
-/************** Begin file pager.h *******************************************/
-/*
-** 2001 September 15
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This header file defines the interface that the sqlite page cache
-** subsystem. The page cache subsystem reads and writes a file a page
-** at a time and provides a journal for rollback.
-*/
-
-#ifndef SQLITE_PAGER_H
-#define SQLITE_PAGER_H
-
-/*
-** Default maximum size for persistent journal files. A negative
-** value means no limit. This value may be overridden using the
-** sqlite3PagerJournalSizeLimit() API. See also "PRAGMA journal_size_limit".
-*/
-#ifndef SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT
- #define SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT -1
-#endif
-
-/*
-** The type used to represent a page number. The first page in a file
-** is called page 1. 0 is used to represent "not a page".
-*/
-typedef u32 Pgno;
-
-/*
-** Each open file is managed by a separate instance of the "Pager" structure.
-*/
-typedef struct Pager Pager;
-
-/*
-** Handle type for pages.
-*/
-typedef struct PgHdr DbPage;
-
-/*
-** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is
-** reserved for working around a windows/posix incompatibility). It is
-** used in the journal to signify that the remainder of the journal file
-** is devoted to storing a master journal name - there are no more pages to
-** roll back. See comments for function writeMasterJournal() in pager.c
-** for details.
-*/
-#define PAGER_MJ_PGNO(x) ((Pgno)((PENDING_BYTE/((x)->pageSize))+1))
-
-/*
-** Allowed values for the flags parameter to sqlite3PagerOpen().
-**
-** NOTE: These values must match the corresponding BTREE_ values in btree.h.
-*/
-#define PAGER_OMIT_JOURNAL 0x0001 /* Do not use a rollback journal */
-#define PAGER_MEMORY 0x0002 /* In-memory database */
-
-/*
-** Valid values for the second argument to sqlite3PagerLockingMode().
-*/
-#define PAGER_LOCKINGMODE_QUERY -1
-#define PAGER_LOCKINGMODE_NORMAL 0
-#define PAGER_LOCKINGMODE_EXCLUSIVE 1
-
-/*
-** Numeric constants that encode the journalmode.
-**
-** The numeric values encoded here (other than PAGER_JOURNALMODE_QUERY)
-** are exposed in the API via the "PRAGMA journal_mode" command and
-** therefore cannot be changed without a compatibility break.
-*/
-#define PAGER_JOURNALMODE_QUERY (-1) /* Query the value of journalmode */
-#define PAGER_JOURNALMODE_DELETE 0 /* Commit by deleting journal file */
-#define PAGER_JOURNALMODE_PERSIST 1 /* Commit by zeroing journal header */
-#define PAGER_JOURNALMODE_OFF 2 /* Journal omitted. */
-#define PAGER_JOURNALMODE_TRUNCATE 3 /* Commit by truncating journal */
-#define PAGER_JOURNALMODE_MEMORY 4 /* In-memory journal file */
-#define PAGER_JOURNALMODE_WAL 5 /* Use write-ahead logging */
-
-/*
-** Flags that make up the mask passed to sqlite3PagerGet().
-*/
-#define PAGER_GET_NOCONTENT 0x01 /* Do not load data from disk */
-#define PAGER_GET_READONLY 0x02 /* Read-only page is acceptable */
-
-/*
-** Flags for sqlite3PagerSetFlags()
-**
-** Value constraints (enforced via assert()):
-** PAGER_FULLFSYNC == SQLITE_FullFSync
-** PAGER_CKPT_FULLFSYNC == SQLITE_CkptFullFSync
-** PAGER_CACHE_SPILL == SQLITE_CacheSpill
-*/
-#define PAGER_SYNCHRONOUS_OFF 0x01 /* PRAGMA synchronous=OFF */
-#define PAGER_SYNCHRONOUS_NORMAL 0x02 /* PRAGMA synchronous=NORMAL */
-#define PAGER_SYNCHRONOUS_FULL 0x03 /* PRAGMA synchronous=FULL */
-#define PAGER_SYNCHRONOUS_EXTRA 0x04 /* PRAGMA synchronous=EXTRA */
-#define PAGER_SYNCHRONOUS_MASK 0x07 /* Mask for four values above */
-#define PAGER_FULLFSYNC 0x08 /* PRAGMA fullfsync=ON */
-#define PAGER_CKPT_FULLFSYNC 0x10 /* PRAGMA checkpoint_fullfsync=ON */
-#define PAGER_CACHESPILL 0x20 /* PRAGMA cache_spill=ON */
-#define PAGER_FLAGS_MASK 0x38 /* All above except SYNCHRONOUS */
-
-/*
-** The remainder of this file contains the declarations of the functions
-** that make up the Pager sub-system API. See source code comments for
-** a detailed description of each routine.
-*/
-
-/* Open and close a Pager connection. */
-SQLITE_PRIVATE int sqlite3PagerOpen(
- sqlite3_vfs*,
- Pager **ppPager,
- const char*,
- int,
- int,
- int,
- void(*)(DbPage*)
-);
-SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager);
-SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager*, int, unsigned char*);
-
-/* Functions used to configure a Pager object. */
-SQLITE_PRIVATE void sqlite3PagerSetBusyhandler(Pager*, int(*)(void *), void *);
-SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager*, u32*, int);
-#ifdef SQLITE_HAS_CODEC
-SQLITE_PRIVATE void sqlite3PagerAlignReserve(Pager*,Pager*);
-#endif
-SQLITE_PRIVATE int sqlite3PagerMaxPageCount(Pager*, int);
-SQLITE_PRIVATE void sqlite3PagerSetCachesize(Pager*, int);
-SQLITE_PRIVATE int sqlite3PagerSetSpillsize(Pager*, int);
-SQLITE_PRIVATE void sqlite3PagerSetMmapLimit(Pager *, sqlite3_int64);
-SQLITE_PRIVATE void sqlite3PagerShrink(Pager*);
-SQLITE_PRIVATE void sqlite3PagerSetFlags(Pager*,unsigned);
-SQLITE_PRIVATE int sqlite3PagerLockingMode(Pager *, int);
-SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *, int);
-SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager*);
-SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager*);
-SQLITE_PRIVATE i64 sqlite3PagerJournalSizeLimit(Pager *, i64);
-SQLITE_PRIVATE sqlite3_backup **sqlite3PagerBackupPtr(Pager*);
-SQLITE_PRIVATE int sqlite3PagerFlush(Pager*);
-
-/* Functions used to obtain and release page references. */
-SQLITE_PRIVATE int sqlite3PagerGet(Pager *pPager, Pgno pgno, DbPage **ppPage, int clrFlag);
-SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno);
-SQLITE_PRIVATE void sqlite3PagerRef(DbPage*);
-SQLITE_PRIVATE void sqlite3PagerUnref(DbPage*);
-SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage*);
-
-/* Operations on page references. */
-SQLITE_PRIVATE int sqlite3PagerWrite(DbPage*);
-SQLITE_PRIVATE void sqlite3PagerDontWrite(DbPage*);
-SQLITE_PRIVATE int sqlite3PagerMovepage(Pager*,DbPage*,Pgno,int);
-SQLITE_PRIVATE int sqlite3PagerPageRefcount(DbPage*);
-SQLITE_PRIVATE void *sqlite3PagerGetData(DbPage *);
-SQLITE_PRIVATE void *sqlite3PagerGetExtra(DbPage *);
-
-/* Functions used to manage pager transactions and savepoints. */
-SQLITE_PRIVATE void sqlite3PagerPagecount(Pager*, int*);
-SQLITE_PRIVATE int sqlite3PagerBegin(Pager*, int exFlag, int);
-SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(Pager*,const char *zMaster, int);
-SQLITE_PRIVATE int sqlite3PagerExclusiveLock(Pager*);
-SQLITE_PRIVATE int sqlite3PagerSync(Pager *pPager, const char *zMaster);
-SQLITE_PRIVATE int sqlite3PagerCommitPhaseTwo(Pager*);
-SQLITE_PRIVATE int sqlite3PagerRollback(Pager*);
-SQLITE_PRIVATE int sqlite3PagerOpenSavepoint(Pager *pPager, int n);
-SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint);
-SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager);
-
-#ifndef SQLITE_OMIT_WAL
-SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int, int*, int*);
-SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager);
-SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager);
-SQLITE_PRIVATE int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen);
-SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager);
-# ifdef SQLITE_ENABLE_SNAPSHOT
-SQLITE_PRIVATE int sqlite3PagerSnapshotGet(Pager *pPager, sqlite3_snapshot **ppSnapshot);
-SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(Pager *pPager, sqlite3_snapshot *pSnapshot);
-# endif
-#endif
-
-#ifdef SQLITE_ENABLE_ZIPVFS
-SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager);
-#endif
-
-/* Functions used to query pager state and configuration. */
-SQLITE_PRIVATE u8 sqlite3PagerIsreadonly(Pager*);
-SQLITE_PRIVATE u32 sqlite3PagerDataVersion(Pager*);
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE int sqlite3PagerRefcount(Pager*);
-#endif
-SQLITE_PRIVATE int sqlite3PagerMemUsed(Pager*);
-SQLITE_PRIVATE const char *sqlite3PagerFilename(Pager*, int);
-SQLITE_PRIVATE sqlite3_vfs *sqlite3PagerVfs(Pager*);
-SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager*);
-SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager*);
-SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*);
-SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*);
-SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*);
-SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *);
-SQLITE_PRIVATE void sqlite3PagerClearCache(Pager*);
-SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *);
-
-/* Functions used to truncate the database file. */
-SQLITE_PRIVATE void sqlite3PagerTruncateImage(Pager*,Pgno);
-
-SQLITE_PRIVATE void sqlite3PagerRekey(DbPage*, Pgno, u16);
-
-#if defined(SQLITE_HAS_CODEC) && !defined(SQLITE_OMIT_WAL)
-SQLITE_PRIVATE void *sqlite3PagerCodec(DbPage *);
-#endif
-
-/* Functions to support testing and debugging. */
-#if !defined(NDEBUG) || defined(SQLITE_TEST)
-SQLITE_PRIVATE Pgno sqlite3PagerPagenumber(DbPage*);
-SQLITE_PRIVATE int sqlite3PagerIswriteable(DbPage*);
-#endif
-#ifdef SQLITE_TEST
-SQLITE_PRIVATE int *sqlite3PagerStats(Pager*);
-SQLITE_PRIVATE void sqlite3PagerRefdump(Pager*);
- void disable_simulated_io_errors(void);
- void enable_simulated_io_errors(void);
-#else
-# define disable_simulated_io_errors()
-# define enable_simulated_io_errors()
-#endif
-
-#endif /* SQLITE_PAGER_H */
-
-/************** End of pager.h ***********************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-/************** Include pcache.h in the middle of sqliteInt.h ****************/
-/************** Begin file pcache.h ******************************************/
-/*
-** 2008 August 05
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This header file defines the interface that the sqlite page cache
-** subsystem.
-*/
-
-#ifndef _PCACHE_H_
-
-typedef struct PgHdr PgHdr;
-typedef struct PCache PCache;
-
-/*
-** Every page in the cache is controlled by an instance of the following
-** structure.
-*/
-struct PgHdr {
- sqlite3_pcache_page *pPage; /* Pcache object page handle */
- void *pData; /* Page data */
- void *pExtra; /* Extra content */
- PgHdr *pDirty; /* Transient list of dirty sorted by pgno */
- Pager *pPager; /* The pager this page is part of */
- Pgno pgno; /* Page number for this page */
-#ifdef SQLITE_CHECK_PAGES
- u32 pageHash; /* Hash of page content */
-#endif
- u16 flags; /* PGHDR flags defined below */
-
- /**********************************************************************
- ** Elements above are public. All that follows is private to pcache.c
- ** and should not be accessed by other modules.
- */
- i16 nRef; /* Number of users of this page */
- PCache *pCache; /* Cache that owns this page */
-
- PgHdr *pDirtyNext; /* Next element in list of dirty pages */
- PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */
-};
-
-/* Bit values for PgHdr.flags */
-#define PGHDR_CLEAN 0x001 /* Page not on the PCache.pDirty list */
-#define PGHDR_DIRTY 0x002 /* Page is on the PCache.pDirty list */
-#define PGHDR_WRITEABLE 0x004 /* Journaled and ready to modify */
-#define PGHDR_NEED_SYNC 0x008 /* Fsync the rollback journal before
- ** writing this page to the database */
-#define PGHDR_DONT_WRITE 0x010 /* Do not write content to disk */
-#define PGHDR_MMAP 0x020 /* This is an mmap page object */
-
-#define PGHDR_WAL_APPEND 0x040 /* Appended to wal file */
-
-/* Initialize and shutdown the page cache subsystem */
-SQLITE_PRIVATE int sqlite3PcacheInitialize(void);
-SQLITE_PRIVATE void sqlite3PcacheShutdown(void);
-
-/* Page cache buffer management:
-** These routines implement SQLITE_CONFIG_PAGECACHE.
-*/
-SQLITE_PRIVATE void sqlite3PCacheBufferSetup(void *, int sz, int n);
-
-/* Create a new pager cache.
-** Under memory stress, invoke xStress to try to make pages clean.
-** Only clean and unpinned pages can be reclaimed.
-*/
-SQLITE_PRIVATE int sqlite3PcacheOpen(
- int szPage, /* Size of every page */
- int szExtra, /* Extra space associated with each page */
- int bPurgeable, /* True if pages are on backing store */
- int (*xStress)(void*, PgHdr*), /* Call to try to make pages clean */
- void *pStress, /* Argument to xStress */
- PCache *pToInit /* Preallocated space for the PCache */
-);
-
-/* Modify the page-size after the cache has been created. */
-SQLITE_PRIVATE int sqlite3PcacheSetPageSize(PCache *, int);
-
-/* Return the size in bytes of a PCache object. Used to preallocate
-** storage space.
-*/
-SQLITE_PRIVATE int sqlite3PcacheSize(void);
-
-/* One release per successful fetch. Page is pinned until released.
-** Reference counted.
-*/
-SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(PCache*, Pgno, int createFlag);
-SQLITE_PRIVATE int sqlite3PcacheFetchStress(PCache*, Pgno, sqlite3_pcache_page**);
-SQLITE_PRIVATE PgHdr *sqlite3PcacheFetchFinish(PCache*, Pgno, sqlite3_pcache_page *pPage);
-SQLITE_PRIVATE void sqlite3PcacheRelease(PgHdr*);
-
-SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr*); /* Remove page from cache */
-SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr*); /* Make sure page is marked dirty */
-SQLITE_PRIVATE void sqlite3PcacheMakeClean(PgHdr*); /* Mark a single page as clean */
-SQLITE_PRIVATE void sqlite3PcacheCleanAll(PCache*); /* Mark all dirty list pages as clean */
-SQLITE_PRIVATE void sqlite3PcacheClearWritable(PCache*);
-
-/* Change a page number. Used by incr-vacuum. */
-SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr*, Pgno);
-
-/* Remove all pages with pgno>x. Reset the cache if x==0 */
-SQLITE_PRIVATE void sqlite3PcacheTruncate(PCache*, Pgno x);
-
-/* Get a list of all dirty pages in the cache, sorted by page number */
-SQLITE_PRIVATE PgHdr *sqlite3PcacheDirtyList(PCache*);
-
-/* Reset and close the cache object */
-SQLITE_PRIVATE void sqlite3PcacheClose(PCache*);
-
-/* Clear flags from pages of the page cache */
-SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *);
-
-/* Discard the contents of the cache */
-SQLITE_PRIVATE void sqlite3PcacheClear(PCache*);
-
-/* Return the total number of outstanding page references */
-SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache*);
-
-/* Increment the reference count of an existing page */
-SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr*);
-
-SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr*);
-
-/* Return the total number of pages stored in the cache */
-SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*);
-
-#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG)
-/* Iterate through all dirty pages currently stored in the cache. This
-** interface is only available if SQLITE_CHECK_PAGES is defined when the
-** library is built.
-*/
-SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHdr *));
-#endif
-
-#if defined(SQLITE_DEBUG)
-/* Check invariants on a PgHdr object */
-SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr*);
-#endif
-
-/* Set and get the suggested cache-size for the specified pager-cache.
-**
-** If no global maximum is configured, then the system attempts to limit
-** the total number of pages cached by purgeable pager-caches to the sum
-** of the suggested cache-sizes.
-*/
-SQLITE_PRIVATE void sqlite3PcacheSetCachesize(PCache *, int);
-#ifdef SQLITE_TEST
-SQLITE_PRIVATE int sqlite3PcacheGetCachesize(PCache *);
-#endif
-
-/* Set or get the suggested spill-size for the specified pager-cache.
-**
-** The spill-size is the minimum number of pages in cache before the cache
-** will attempt to spill dirty pages by calling xStress.
-*/
-SQLITE_PRIVATE int sqlite3PcacheSetSpillsize(PCache *, int);
-
-/* Free up as much memory as possible from the page cache */
-SQLITE_PRIVATE void sqlite3PcacheShrink(PCache*);
-
-#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
-/* Try to return memory used by the pcache module to the main memory heap */
-SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int);
-#endif
-
-#ifdef SQLITE_TEST
-SQLITE_PRIVATE void sqlite3PcacheStats(int*,int*,int*,int*);
-#endif
-
-SQLITE_PRIVATE void sqlite3PCacheSetDefault(void);
-
-/* Return the header size */
-SQLITE_PRIVATE int sqlite3HeaderSizePcache(void);
-SQLITE_PRIVATE int sqlite3HeaderSizePcache1(void);
-
-/* Number of dirty pages as a percentage of the configured cache size */
-SQLITE_PRIVATE int sqlite3PCachePercentDirty(PCache*);
-
-#endif /* _PCACHE_H_ */
-
-/************** End of pcache.h **********************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-/************** Include os.h in the middle of sqliteInt.h ********************/
-/************** Begin file os.h **********************************************/
-/*
-** 2001 September 16
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This header file (together with is companion C source-code file
-** "os.c") attempt to abstract the underlying operating system so that
-** the SQLite library will work on both POSIX and windows systems.
-**
-** This header file is #include-ed by sqliteInt.h and thus ends up
-** being included by every source file.
-*/
-#ifndef _SQLITE_OS_H_
-#define _SQLITE_OS_H_
-
-/*
-** Attempt to automatically detect the operating system and setup the
-** necessary pre-processor macros for it.
-*/
-/************** Include os_setup.h in the middle of os.h *********************/
-/************** Begin file os_setup.h ****************************************/
-/*
-** 2013 November 25
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains pre-processor directives related to operating system
-** detection and/or setup.
-*/
-#ifndef SQLITE_OS_SETUP_H
-#define SQLITE_OS_SETUP_H
-
-/*
-** Figure out if we are dealing with Unix, Windows, or some other operating
-** system.
-**
-** After the following block of preprocess macros, all of SQLITE_OS_UNIX,
-** SQLITE_OS_WIN, and SQLITE_OS_OTHER will defined to either 1 or 0. One of
-** the three will be 1. The other two will be 0.
-*/
-#if defined(SQLITE_OS_OTHER)
-# if SQLITE_OS_OTHER==1
-# undef SQLITE_OS_UNIX
-# define SQLITE_OS_UNIX 0
-# undef SQLITE_OS_WIN
-# define SQLITE_OS_WIN 0
-# else
-# undef SQLITE_OS_OTHER
-# endif
-#endif
-#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER)
-# define SQLITE_OS_OTHER 0
-# ifndef SQLITE_OS_WIN
-# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \
- defined(__MINGW32__) || defined(__BORLANDC__)
-# define SQLITE_OS_WIN 1
-# define SQLITE_OS_UNIX 0
-# else
-# define SQLITE_OS_WIN 0
-# define SQLITE_OS_UNIX 1
-# endif
-# else
-# define SQLITE_OS_UNIX 0
-# endif
-#else
-# ifndef SQLITE_OS_WIN
-# define SQLITE_OS_WIN 0
-# endif
-#endif
-
-#endif /* SQLITE_OS_SETUP_H */
-
-/************** End of os_setup.h ********************************************/
-/************** Continuing where we left off in os.h *************************/
-
-/* If the SET_FULLSYNC macro is not defined above, then make it
-** a no-op
-*/
-#ifndef SET_FULLSYNC
-# define SET_FULLSYNC(x,y)
-#endif
-
-/*
-** The default size of a disk sector
-*/
-#ifndef SQLITE_DEFAULT_SECTOR_SIZE
-# define SQLITE_DEFAULT_SECTOR_SIZE 4096
-#endif
-
-/*
-** Temporary files are named starting with this prefix followed by 16 random
-** alphanumeric characters, and no file extension. They are stored in the
-** OS's standard temporary file directory, and are deleted prior to exit.
-** If sqlite is being embedded in another program, you may wish to change the
-** prefix to reflect your program's name, so that if your program exits
-** prematurely, old temporary files can be easily identified. This can be done
-** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line.
-**
-** 2006-10-31: The default prefix used to be "sqlite_". But then
-** Mcafee started using SQLite in their anti-virus product and it
-** started putting files with the "sqlite" name in the c:/temp folder.
-** This annoyed many windows users. Those users would then do a
-** Google search for "sqlite", find the telephone numbers of the
-** developers and call to wake them up at night and complain.
-** For this reason, the default name prefix is changed to be "sqlite"
-** spelled backwards. So the temp files are still identified, but
-** anybody smart enough to figure out the code is also likely smart
-** enough to know that calling the developer will not help get rid
-** of the file.
-*/
-#ifndef SQLITE_TEMP_FILE_PREFIX
-# define SQLITE_TEMP_FILE_PREFIX "etilqs_"
-#endif
-
-/*
-** The following values may be passed as the second argument to
-** sqlite3OsLock(). The various locks exhibit the following semantics:
-**
-** SHARED: Any number of processes may hold a SHARED lock simultaneously.
-** RESERVED: A single process may hold a RESERVED lock on a file at
-** any time. Other processes may hold and obtain new SHARED locks.
-** PENDING: A single process may hold a PENDING lock on a file at
-** any one time. Existing SHARED locks may persist, but no new
-** SHARED locks may be obtained by other processes.
-** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks.
-**
-** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a
-** process that requests an EXCLUSIVE lock may actually obtain a PENDING
-** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to
-** sqlite3OsLock().
-*/
-#define NO_LOCK 0
-#define SHARED_LOCK 1
-#define RESERVED_LOCK 2
-#define PENDING_LOCK 3
-#define EXCLUSIVE_LOCK 4
-
-/*
-** File Locking Notes: (Mostly about windows but also some info for Unix)
-**
-** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because
-** those functions are not available. So we use only LockFile() and
-** UnlockFile().
-**
-** LockFile() prevents not just writing but also reading by other processes.
-** A SHARED_LOCK is obtained by locking a single randomly-chosen
-** byte out of a specific range of bytes. The lock byte is obtained at
-** random so two separate readers can probably access the file at the
-** same time, unless they are unlucky and choose the same lock byte.
-** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range.
-** There can only be one writer. A RESERVED_LOCK is obtained by locking
-** a single byte of the file that is designated as the reserved lock byte.
-** A PENDING_LOCK is obtained by locking a designated byte different from
-** the RESERVED_LOCK byte.
-**
-** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available,
-** which means we can use reader/writer locks. When reader/writer locks
-** are used, the lock is placed on the same range of bytes that is used
-** for probabilistic locking in Win95/98/ME. Hence, the locking scheme
-** will support two or more Win95 readers or two or more WinNT readers.
-** But a single Win95 reader will lock out all WinNT readers and a single
-** WinNT reader will lock out all other Win95 readers.
-**
-** The following #defines specify the range of bytes used for locking.
-** SHARED_SIZE is the number of bytes available in the pool from which
-** a random byte is selected for a shared lock. The pool of bytes for
-** shared locks begins at SHARED_FIRST.
-**
-** The same locking strategy and
-** byte ranges are used for Unix. This leaves open the possibility of having
-** clients on win95, winNT, and unix all talking to the same shared file
-** and all locking correctly. To do so would require that samba (or whatever
-** tool is being used for file sharing) implements locks correctly between
-** windows and unix. I'm guessing that isn't likely to happen, but by
-** using the same locking range we are at least open to the possibility.
-**
-** Locking in windows is manditory. For this reason, we cannot store
-** actual data in the bytes used for locking. The pager never allocates
-** the pages involved in locking therefore. SHARED_SIZE is selected so
-** that all locks will fit on a single page even at the minimum page size.
-** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE
-** is set high so that we don't have to allocate an unused page except
-** for very large databases. But one should test the page skipping logic
-** by setting PENDING_BYTE low and running the entire regression suite.
-**
-** Changing the value of PENDING_BYTE results in a subtly incompatible
-** file format. Depending on how it is changed, you might not notice
-** the incompatibility right away, even running a full regression test.
-** The default location of PENDING_BYTE is the first byte past the
-** 1GB boundary.
-**
-*/
-#ifdef SQLITE_OMIT_WSD
-# define PENDING_BYTE (0x40000000)
-#else
-# define PENDING_BYTE sqlite3PendingByte
-#endif
-#define RESERVED_BYTE (PENDING_BYTE+1)
-#define SHARED_FIRST (PENDING_BYTE+2)
-#define SHARED_SIZE 510
-
-/*
-** Wrapper around OS specific sqlite3_os_init() function.
-*/
-SQLITE_PRIVATE int sqlite3OsInit(void);
-
-/*
-** Functions for accessing sqlite3_file methods
-*/
-SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file*);
-SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset);
-SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset);
-SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size);
-SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file*, int);
-SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file*, i64 *pSize);
-SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file*, int);
-SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file*, int);
-SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut);
-SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file*,int,void*);
-SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file*,int,void*);
-#define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0
-SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id);
-SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id);
-SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **);
-SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int);
-SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id);
-SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int);
-SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64, int, void **);
-SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *, i64, void *);
-
-
-/*
-** Functions for accessing sqlite3_vfs methods
-*/
-SQLITE_PRIVATE int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *);
-SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *, const char *, int);
-SQLITE_PRIVATE int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut);
-SQLITE_PRIVATE int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *);
-#ifndef SQLITE_OMIT_LOAD_EXTENSION
-SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *, const char *);
-SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *, int, char *);
-SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void);
-SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *, void *);
-#endif /* SQLITE_OMIT_LOAD_EXTENSION */
-SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *, int, char *);
-SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *, int);
-SQLITE_PRIVATE int sqlite3OsGetLastError(sqlite3_vfs*);
-SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*);
-
-/*
-** Convenience functions for opening and closing files using
-** sqlite3_malloc() to obtain space for the file-handle structure.
-*/
-SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*);
-SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *);
-
-#endif /* _SQLITE_OS_H_ */
-
-/************** End of os.h **************************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-/************** Include mutex.h in the middle of sqliteInt.h *****************/
-/************** Begin file mutex.h *******************************************/
-/*
-** 2007 August 28
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** This file contains the common header for all mutex implementations.
-** The sqliteInt.h header #includes this file so that it is available
-** to all source files. We break it out in an effort to keep the code
-** better organized.
-**
-** NOTE: source files should *not* #include this header file directly.
-** Source files should #include the sqliteInt.h file and let that file
-** include this one indirectly.
-*/
-
-
-/*
-** Figure out what version of the code to use. The choices are
-**
-** SQLITE_MUTEX_OMIT No mutex logic. Not even stubs. The
-** mutexes implementation cannot be overridden
-** at start-time.
-**
-** SQLITE_MUTEX_NOOP For single-threaded applications. No
-** mutual exclusion is provided. But this
-** implementation can be overridden at
-** start-time.
-**
-** SQLITE_MUTEX_PTHREADS For multi-threaded applications on Unix.
-**
-** SQLITE_MUTEX_W32 For multi-threaded applications on Win32.
-*/
-#if !SQLITE_THREADSAFE
-# define SQLITE_MUTEX_OMIT
-#endif
-#if SQLITE_THREADSAFE && !defined(SQLITE_MUTEX_NOOP)
-# if SQLITE_OS_UNIX
-# define SQLITE_MUTEX_PTHREADS
-# elif SQLITE_OS_WIN
-# define SQLITE_MUTEX_W32
-# else
-# define SQLITE_MUTEX_NOOP
-# endif
-#endif
-
-#ifdef SQLITE_MUTEX_OMIT
-/*
-** If this is a no-op implementation, implement everything as macros.
-*/
-#define sqlite3_mutex_alloc(X) ((sqlite3_mutex*)8)
-#define sqlite3_mutex_free(X)
-#define sqlite3_mutex_enter(X)
-#define sqlite3_mutex_try(X) SQLITE_OK
-#define sqlite3_mutex_leave(X)
-#define sqlite3_mutex_held(X) ((void)(X),1)
-#define sqlite3_mutex_notheld(X) ((void)(X),1)
-#define sqlite3MutexAlloc(X) ((sqlite3_mutex*)8)
-#define sqlite3MutexInit() SQLITE_OK
-#define sqlite3MutexEnd()
-#define MUTEX_LOGIC(X)
-#else
-#define MUTEX_LOGIC(X) X
-#endif /* defined(SQLITE_MUTEX_OMIT) */
-
-/************** End of mutex.h ***********************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
-
-/* The SQLITE_EXTRA_DURABLE compile-time option used to set the default
-** synchronous setting to EXTRA. It is no longer supported.
-*/
-#ifdef SQLITE_EXTRA_DURABLE
-# warning Use SQLITE_DEFAULT_SYNCHRONOUS=3 instead of SQLITE_EXTRA_DURABLE
-# define SQLITE_DEFAULT_SYNCHRONOUS 3
-#endif
-
-/*
-** Default synchronous levels.
-**
-** Note that (for historcal reasons) the PAGER_SYNCHRONOUS_* macros differ
-** from the SQLITE_DEFAULT_SYNCHRONOUS value by 1.
-**
-** PAGER_SYNCHRONOUS DEFAULT_SYNCHRONOUS
-** OFF 1 0
-** NORMAL 2 1
-** FULL 3 2
-** EXTRA 4 3
-**
-** The "PRAGMA synchronous" statement also uses the zero-based numbers.
-** In other words, the zero-based numbers are used for all external interfaces
-** and the one-based values are used internally.
-*/
-#ifndef SQLITE_DEFAULT_SYNCHRONOUS
-# define SQLITE_DEFAULT_SYNCHRONOUS (PAGER_SYNCHRONOUS_FULL-1)
-#endif
-#ifndef SQLITE_DEFAULT_WAL_SYNCHRONOUS
-# define SQLITE_DEFAULT_WAL_SYNCHRONOUS SQLITE_DEFAULT_SYNCHRONOUS
-#endif
-
-/*
-** Each database file to be accessed by the system is an instance
-** of the following structure. There are normally two of these structures
-** in the sqlite.aDb[] array. aDb[0] is the main database file and
-** aDb[1] is the database file used to hold temporary tables. Additional
-** databases may be attached.
-*/
-struct Db {
- char *zName; /* Name of this database */
- Btree *pBt; /* The B*Tree structure for this database file */
- u8 safety_level; /* How aggressive at syncing data to disk */
- u8 bSyncSet; /* True if "PRAGMA synchronous=N" has been run */
- Schema *pSchema; /* Pointer to database schema (possibly shared) */
-};
-
-/*
-** An instance of the following structure stores a database schema.
-**
-** Most Schema objects are associated with a Btree. The exception is
-** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing.
-** In shared cache mode, a single Schema object can be shared by multiple
-** Btrees that refer to the same underlying BtShared object.
-**
-** Schema objects are automatically deallocated when the last Btree that
-** references them is destroyed. The TEMP Schema is manually freed by
-** sqlite3_close().
-*
-** A thread must be holding a mutex on the corresponding Btree in order
-** to access Schema content. This implies that the thread must also be
-** holding a mutex on the sqlite3 connection pointer that owns the Btree.
-** For a TEMP Schema, only the connection mutex is required.
-*/
-struct Schema {
- int schema_cookie; /* Database schema version number for this file */
- int iGeneration; /* Generation counter. Incremented with each change */
- Hash tblHash; /* All tables indexed by name */
- Hash idxHash; /* All (named) indices indexed by name */
- Hash trigHash; /* All triggers indexed by name */
- Hash fkeyHash; /* All foreign keys by referenced table name */
- Table *pSeqTab; /* The sqlite_sequence table used by AUTOINCREMENT */
- u8 file_format; /* Schema format version for this file */
- u8 enc; /* Text encoding used by this database */
- u16 schemaFlags; /* Flags associated with this schema */
- int cache_size; /* Number of pages to use in the cache */
-};
-
-/*
-** These macros can be used to test, set, or clear bits in the
-** Db.pSchema->flags field.
-*/
-#define DbHasProperty(D,I,P) (((D)->aDb[I].pSchema->schemaFlags&(P))==(P))
-#define DbHasAnyProperty(D,I,P) (((D)->aDb[I].pSchema->schemaFlags&(P))!=0)
-#define DbSetProperty(D,I,P) (D)->aDb[I].pSchema->schemaFlags|=(P)
-#define DbClearProperty(D,I,P) (D)->aDb[I].pSchema->schemaFlags&=~(P)
-
-/*
-** Allowed values for the DB.pSchema->flags field.
-**
-** The DB_SchemaLoaded flag is set after the database schema has been
-** read into internal hash tables.
-**
-** DB_UnresetViews means that one or more views have column names that
-** have been filled out. If the schema changes, these column names might
-** changes and so the view will need to be reset.
-*/
-#define DB_SchemaLoaded 0x0001 /* The schema has been loaded */
-#define DB_UnresetViews 0x0002 /* Some views have defined column names */
-#define DB_Empty 0x0004 /* The file is empty (length 0 bytes) */
-
-/*
-** The number of different kinds of things that can be limited
-** using the sqlite3_limit() interface.
-*/
-#define SQLITE_N_LIMIT (SQLITE_LIMIT_WORKER_THREADS+1)
-
-/*
-** Lookaside malloc is a set of fixed-size buffers that can be used
-** to satisfy small transient memory allocation requests for objects
-** associated with a particular database connection. The use of
-** lookaside malloc provides a significant performance enhancement
-** (approx 10%) by avoiding numerous malloc/free requests while parsing
-** SQL statements.
-**
-** The Lookaside structure holds configuration information about the
-** lookaside malloc subsystem. Each available memory allocation in
-** the lookaside subsystem is stored on a linked list of LookasideSlot
-** objects.
-**
-** Lookaside allocations are only allowed for objects that are associated
-** with a particular database connection. Hence, schema information cannot
-** be stored in lookaside because in shared cache mode the schema information
-** is shared by multiple database connections. Therefore, while parsing
-** schema information, the Lookaside.bEnabled flag is cleared so that
-** lookaside allocations are not used to construct the schema objects.
-*/
-struct Lookaside {
- u32 bDisable; /* Only operate the lookaside when zero */
- u16 sz; /* Size of each buffer in bytes */
- u8 bMalloced; /* True if pStart obtained from sqlite3_malloc() */
- int nOut; /* Number of buffers currently checked out */
- int mxOut; /* Highwater mark for nOut */
- int anStat[3]; /* 0: hits. 1: size misses. 2: full misses */
- LookasideSlot *pFree; /* List of available buffers */
- void *pStart; /* First byte of available memory space */
- void *pEnd; /* First byte past end of available space */
-};
-struct LookasideSlot {
- LookasideSlot *pNext; /* Next buffer in the list of free buffers */
-};
-
-/*
-** A hash table for built-in function definitions. (Application-defined
-** functions use a regular table table from hash.h.)
-**
-** Hash each FuncDef structure into one of the FuncDefHash.a[] slots.
-** Collisions are on the FuncDef.u.pHash chain.
-*/
-#define SQLITE_FUNC_HASH_SZ 23
-struct FuncDefHash {
- FuncDef *a[SQLITE_FUNC_HASH_SZ]; /* Hash table for functions */
-};
-
-#ifdef SQLITE_USER_AUTHENTICATION
-/*
-** Information held in the "sqlite3" database connection object and used
-** to manage user authentication.
-*/
-typedef struct sqlite3_userauth sqlite3_userauth;
-struct sqlite3_userauth {
- u8 authLevel; /* Current authentication level */
- int nAuthPW; /* Size of the zAuthPW in bytes */
- char *zAuthPW; /* Password used to authenticate */
- char *zAuthUser; /* User name used to authenticate */
-};
-
-/* Allowed values for sqlite3_userauth.authLevel */
-#define UAUTH_Unknown 0 /* Authentication not yet checked */
-#define UAUTH_Fail 1 /* User authentication failed */
-#define UAUTH_User 2 /* Authenticated as a normal user */
-#define UAUTH_Admin 3 /* Authenticated as an administrator */
-
-/* Functions used only by user authorization logic */
-SQLITE_PRIVATE int sqlite3UserAuthTable(const char*);
-SQLITE_PRIVATE int sqlite3UserAuthCheckLogin(sqlite3*,const char*,u8*);
-SQLITE_PRIVATE void sqlite3UserAuthInit(sqlite3*);
-SQLITE_PRIVATE void sqlite3CryptFunc(sqlite3_context*,int,sqlite3_value**);
-
-#endif /* SQLITE_USER_AUTHENTICATION */
-
-/*
-** typedef for the authorization callback function.
-*/
-#ifdef SQLITE_USER_AUTHENTICATION
- typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*,
- const char*, const char*);
-#else
- typedef int (*sqlite3_xauth)(void*,int,const char*,const char*,const char*,
- const char*);
-#endif
-
-#ifndef SQLITE_OMIT_DEPRECATED
-/* This is an extra SQLITE_TRACE macro that indicates "legacy" tracing
-** in the style of sqlite3_trace()
-*/
-#define SQLITE_TRACE_LEGACY 0x80
-#else
-#define SQLITE_TRACE_LEGACY 0
-#endif /* SQLITE_OMIT_DEPRECATED */
-
-
-/*
-** Each database connection is an instance of the following structure.
-*/
-struct sqlite3 {
- sqlite3_vfs *pVfs; /* OS Interface */
- struct Vdbe *pVdbe; /* List of active virtual machines */
- CollSeq *pDfltColl; /* The default collating sequence (BINARY) */
- sqlite3_mutex *mutex; /* Connection mutex */
- Db *aDb; /* All backends */
- int nDb; /* Number of backends currently in use */
- int flags; /* Miscellaneous flags. See below */
- i64 lastRowid; /* ROWID of most recent insert (see above) */
- i64 szMmap; /* Default mmap_size setting */
- unsigned int openFlags; /* Flags passed to sqlite3_vfs.xOpen() */
- int errCode; /* Most recent error code (SQLITE_*) */
- int errMask; /* & result codes with this before returning */
- int iSysErrno; /* Errno value from last system error */
- u16 dbOptFlags; /* Flags to enable/disable optimizations */
- u8 enc; /* Text encoding */
- u8 autoCommit; /* The auto-commit flag. */
- u8 temp_store; /* 1: file 2: memory 0: default */
- u8 mallocFailed; /* True if we have seen a malloc failure */
- u8 bBenignMalloc; /* Do not require OOMs if true */
- u8 dfltLockMode; /* Default locking-mode for attached dbs */
- signed char nextAutovac; /* Autovac setting after VACUUM if >=0 */
- u8 suppressErr; /* Do not issue error messages if true */
- u8 vtabOnConflict; /* Value to return for s3_vtab_on_conflict() */
- u8 isTransactionSavepoint; /* True if the outermost savepoint is a TS */
- u8 mTrace; /* zero or more SQLITE_TRACE flags */
- int nextPagesize; /* Pagesize after VACUUM if >0 */
- u32 magic; /* Magic number for detect library misuse */
- int nChange; /* Value returned by sqlite3_changes() */
- int nTotalChange; /* Value returned by sqlite3_total_changes() */
- int aLimit[SQLITE_N_LIMIT]; /* Limits */
- int nMaxSorterMmap; /* Maximum size of regions mapped by sorter */
- struct sqlite3InitInfo { /* Information used during initialization */
- int newTnum; /* Rootpage of table being initialized */
- u8 iDb; /* Which db file is being initialized */
- u8 busy; /* TRUE if currently initializing */
- u8 orphanTrigger; /* Last statement is orphaned TEMP trigger */
- u8 imposterTable; /* Building an imposter table */
- } init;
- int nVdbeActive; /* Number of VDBEs currently running */
- int nVdbeRead; /* Number of active VDBEs that read or write */
- int nVdbeWrite; /* Number of active VDBEs that read and write */
- int nVdbeExec; /* Number of nested calls to VdbeExec() */
- int nVDestroy; /* Number of active OP_VDestroy operations */
- int nExtension; /* Number of loaded extensions */
- void **aExtension; /* Array of shared library handles */
- int (*xTrace)(u32,void*,void*,void*); /* Trace function */
- void *pTraceArg; /* Argument to the trace function */
- void (*xProfile)(void*,const char*,u64); /* Profiling function */
- void *pProfileArg; /* Argument to profile function */
- void *pCommitArg; /* Argument to xCommitCallback() */
- int (*xCommitCallback)(void*); /* Invoked at every commit. */
- void *pRollbackArg; /* Argument to xRollbackCallback() */
- void (*xRollbackCallback)(void*); /* Invoked at every commit. */
- void *pUpdateArg;
- void (*xUpdateCallback)(void*,int, const char*,const char*,sqlite_int64);
-#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
- void *pPreUpdateArg; /* First argument to xPreUpdateCallback */
- void (*xPreUpdateCallback)( /* Registered using sqlite3_preupdate_hook() */
- void*,sqlite3*,int,char const*,char const*,sqlite3_int64,sqlite3_int64
- );
- PreUpdate *pPreUpdate; /* Context for active pre-update callback */
-#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
-#ifndef SQLITE_OMIT_WAL
- int (*xWalCallback)(void *, sqlite3 *, const char *, int);
- void *pWalArg;
-#endif
- void(*xCollNeeded)(void*,sqlite3*,int eTextRep,const char*);
- void(*xCollNeeded16)(void*,sqlite3*,int eTextRep,const void*);
- void *pCollNeededArg;
- sqlite3_value *pErr; /* Most recent error message */
- union {
- volatile int isInterrupted; /* True if sqlite3_interrupt has been called */
- double notUsed1; /* Spacer */
- } u1;
- Lookaside lookaside; /* Lookaside malloc configuration */
-#ifndef SQLITE_OMIT_AUTHORIZATION
- sqlite3_xauth xAuth; /* Access authorization function */
- void *pAuthArg; /* 1st argument to the access auth function */
-#endif
-#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
- int (*xProgress)(void *); /* The progress callback */
- void *pProgressArg; /* Argument to the progress callback */
- unsigned nProgressOps; /* Number of opcodes for progress callback */
-#endif
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- int nVTrans; /* Allocated size of aVTrans */
- Hash aModule; /* populated by sqlite3_create_module() */
- VtabCtx *pVtabCtx; /* Context for active vtab connect/create */
- VTable **aVTrans; /* Virtual tables with open transactions */
- VTable *pDisconnect; /* Disconnect these in next sqlite3_prepare() */
-#endif
- Hash aFunc; /* Hash table of connection functions */
- Hash aCollSeq; /* All collating sequences */
- BusyHandler busyHandler; /* Busy callback */
- Db aDbStatic[2]; /* Static space for the 2 default backends */
- Savepoint *pSavepoint; /* List of active savepoints */
- int busyTimeout; /* Busy handler timeout, in msec */
- int nSavepoint; /* Number of non-transaction savepoints */
- int nStatement; /* Number of nested statement-transactions */
- i64 nDeferredCons; /* Net deferred constraints this transaction. */
- i64 nDeferredImmCons; /* Net deferred immediate constraints */
- int *pnBytesFreed; /* If not NULL, increment this in DbFree() */
-#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
- /* The following variables are all protected by the STATIC_MASTER
- ** mutex, not by sqlite3.mutex. They are used by code in notify.c.
- **
- ** When X.pUnlockConnection==Y, that means that X is waiting for Y to
- ** unlock so that it can proceed.
- **
- ** When X.pBlockingConnection==Y, that means that something that X tried
- ** tried to do recently failed with an SQLITE_LOCKED error due to locks
- ** held by Y.
- */
- sqlite3 *pBlockingConnection; /* Connection that caused SQLITE_LOCKED */
- sqlite3 *pUnlockConnection; /* Connection to watch for unlock */
- void *pUnlockArg; /* Argument to xUnlockNotify */
- void (*xUnlockNotify)(void **, int); /* Unlock notify callback */
- sqlite3 *pNextBlocked; /* Next in list of all blocked connections */
-#endif
-#ifdef SQLITE_USER_AUTHENTICATION
- sqlite3_userauth auth; /* User authentication information */
-#endif
-};
-
-/*
-** A macro to discover the encoding of a database.
-*/
-#define SCHEMA_ENC(db) ((db)->aDb[0].pSchema->enc)
-#define ENC(db) ((db)->enc)
-
-/*
-** Possible values for the sqlite3.flags.
-**
-** Value constraints (enforced via assert()):
-** SQLITE_FullFSync == PAGER_FULLFSYNC
-** SQLITE_CkptFullFSync == PAGER_CKPT_FULLFSYNC
-** SQLITE_CacheSpill == PAGER_CACHE_SPILL
-*/
-#define SQLITE_VdbeTrace 0x00000001 /* True to trace VDBE execution */
-#define SQLITE_InternChanges 0x00000002 /* Uncommitted Hash table changes */
-#define SQLITE_FullColNames 0x00000004 /* Show full column names on SELECT */
-#define SQLITE_FullFSync 0x00000008 /* Use full fsync on the backend */
-#define SQLITE_CkptFullFSync 0x00000010 /* Use full fsync for checkpoint */
-#define SQLITE_CacheSpill 0x00000020 /* OK to spill pager cache */
-#define SQLITE_ShortColNames 0x00000040 /* Show short columns names */
-#define SQLITE_CountRows 0x00000080 /* Count rows changed by INSERT, */
- /* DELETE, or UPDATE and return */
- /* the count using a callback. */
-#define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */
- /* result set is empty */
-#define SQLITE_SqlTrace 0x00000200 /* Debug print SQL as it executes */
-#define SQLITE_VdbeListing 0x00000400 /* Debug listings of VDBE programs */
-#define SQLITE_WriteSchema 0x00000800 /* OK to update SQLITE_MASTER */
-#define SQLITE_VdbeAddopTrace 0x00001000 /* Trace sqlite3VdbeAddOp() calls */
-#define SQLITE_IgnoreChecks 0x00002000 /* Do not enforce check constraints */
-#define SQLITE_ReadUncommitted 0x0004000 /* For shared-cache mode */
-#define SQLITE_LegacyFileFmt 0x00008000 /* Create new databases in format 1 */
-#define SQLITE_RecoveryMode 0x00010000 /* Ignore schema errors */
-#define SQLITE_ReverseOrder 0x00020000 /* Reverse unordered SELECTs */
-#define SQLITE_RecTriggers 0x00040000 /* Enable recursive triggers */
-#define SQLITE_ForeignKeys 0x00080000 /* Enforce foreign key constraints */
-#define SQLITE_AutoIndex 0x00100000 /* Enable automatic indexes */
-#define SQLITE_PreferBuiltin 0x00200000 /* Preference to built-in funcs */
-#define SQLITE_LoadExtension 0x00400000 /* Enable load_extension */
-#define SQLITE_LoadExtFunc 0x00800000 /* Enable load_extension() SQL func */
-#define SQLITE_EnableTrigger 0x01000000 /* True to enable triggers */
-#define SQLITE_DeferFKs 0x02000000 /* Defer all FK constraints */
-#define SQLITE_QueryOnly 0x04000000 /* Disable database changes */
-#define SQLITE_VdbeEQP 0x08000000 /* Debug EXPLAIN QUERY PLAN */
-#define SQLITE_Vacuum 0x10000000 /* Currently in a VACUUM */
-#define SQLITE_CellSizeCk 0x20000000 /* Check btree cell sizes on load */
-#define SQLITE_Fts3Tokenizer 0x40000000 /* Enable fts3_tokenizer(2) */
-
-
-/*
-** Bits of the sqlite3.dbOptFlags field that are used by the
-** sqlite3_test_control(SQLITE_TESTCTRL_OPTIMIZATIONS,...) interface to
-** selectively disable various optimizations.
-*/
-#define SQLITE_QueryFlattener 0x0001 /* Query flattening */
-#define SQLITE_ColumnCache 0x0002 /* Column cache */
-#define SQLITE_GroupByOrder 0x0004 /* GROUPBY cover of ORDERBY */
-#define SQLITE_FactorOutConst 0x0008 /* Constant factoring */
-/* not used 0x0010 // Was: SQLITE_IdxRealAsInt */
-#define SQLITE_DistinctOpt 0x0020 /* DISTINCT using indexes */
-#define SQLITE_CoverIdxScan 0x0040 /* Covering index scans */
-#define SQLITE_OrderByIdxJoin 0x0080 /* ORDER BY of joins via index */
-#define SQLITE_SubqCoroutine 0x0100 /* Evaluate subqueries as coroutines */
-#define SQLITE_Transitive 0x0200 /* Transitive constraints */
-#define SQLITE_OmitNoopJoin 0x0400 /* Omit unused tables in joins */
-#define SQLITE_Stat34 0x0800 /* Use STAT3 or STAT4 data */
-#define SQLITE_CursorHints 0x2000 /* Add OP_CursorHint opcodes */
-#define SQLITE_AllOpts 0xffff /* All optimizations */
-
-/*
-** Macros for testing whether or not optimizations are enabled or disabled.
-*/
-#ifndef SQLITE_OMIT_BUILTIN_TEST
-#define OptimizationDisabled(db, mask) (((db)->dbOptFlags&(mask))!=0)
-#define OptimizationEnabled(db, mask) (((db)->dbOptFlags&(mask))==0)
-#else
-#define OptimizationDisabled(db, mask) 0
-#define OptimizationEnabled(db, mask) 1
-#endif
-
-/*
-** Return true if it OK to factor constant expressions into the initialization
-** code. The argument is a Parse object for the code generator.
-*/
-#define ConstFactorOk(P) ((P)->okConstFactor)
-
-/*
-** Possible values for the sqlite.magic field.
-** The numbers are obtained at random and have no special meaning, other
-** than being distinct from one another.
-*/
-#define SQLITE_MAGIC_OPEN 0xa029a697 /* Database is open */
-#define SQLITE_MAGIC_CLOSED 0x9f3c2d33 /* Database is closed */
-#define SQLITE_MAGIC_SICK 0x4b771290 /* Error and awaiting close */
-#define SQLITE_MAGIC_BUSY 0xf03b7906 /* Database currently in use */
-#define SQLITE_MAGIC_ERROR 0xb5357930 /* An SQLITE_MISUSE error occurred */
-#define SQLITE_MAGIC_ZOMBIE 0x64cffc7f /* Close with last statement close */
-
-/*
-** Each SQL function is defined by an instance of the following
-** structure. For global built-in functions (ex: substr(), max(), count())
-** a pointer to this structure is held in the sqlite3BuiltinFunctions object.
-** For per-connection application-defined functions, a pointer to this
-** structure is held in the db->aHash hash table.
-**
-** The u.pHash field is used by the global built-ins. The u.pDestructor
-** field is used by per-connection app-def functions.
-*/
-struct FuncDef {
- i8 nArg; /* Number of arguments. -1 means unlimited */
- u16 funcFlags; /* Some combination of SQLITE_FUNC_* */
- void *pUserData; /* User data parameter */
- FuncDef *pNext; /* Next function with same name */
- void (*xSFunc)(sqlite3_context*,int,sqlite3_value**); /* func or agg-step */
- void (*xFinalize)(sqlite3_context*); /* Agg finalizer */
- const char *zName; /* SQL name of the function. */
- union {
- FuncDef *pHash; /* Next with a different name but the same hash */
- FuncDestructor *pDestructor; /* Reference counted destructor function */
- } u;
-};
-
-/*
-** This structure encapsulates a user-function destructor callback (as
-** configured using create_function_v2()) and a reference counter. When
-** create_function_v2() is called to create a function with a destructor,
-** a single object of this type is allocated. FuncDestructor.nRef is set to
-** the number of FuncDef objects created (either 1 or 3, depending on whether
-** or not the specified encoding is SQLITE_ANY). The FuncDef.pDestructor
-** member of each of the new FuncDef objects is set to point to the allocated
-** FuncDestructor.
-**
-** Thereafter, when one of the FuncDef objects is deleted, the reference
-** count on this object is decremented. When it reaches 0, the destructor
-** is invoked and the FuncDestructor structure freed.
-*/
-struct FuncDestructor {
- int nRef;
- void (*xDestroy)(void *);
- void *pUserData;
-};
-
-/*
-** Possible values for FuncDef.flags. Note that the _LENGTH and _TYPEOF
-** values must correspond to OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG. And
-** SQLITE_FUNC_CONSTANT must be the same as SQLITE_DETERMINISTIC. There
-** are assert() statements in the code to verify this.
-**
-** Value constraints (enforced via assert()):
-** SQLITE_FUNC_MINMAX == NC_MinMaxAgg == SF_MinMaxAgg
-** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG
-** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG
-** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API
-** SQLITE_FUNC_ENCMASK depends on SQLITE_UTF* macros in the API
-*/
-#define SQLITE_FUNC_ENCMASK 0x0003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */
-#define SQLITE_FUNC_LIKE 0x0004 /* Candidate for the LIKE optimization */
-#define SQLITE_FUNC_CASE 0x0008 /* Case-sensitive LIKE-type function */
-#define SQLITE_FUNC_EPHEM 0x0010 /* Ephemeral. Delete with VDBE */
-#define SQLITE_FUNC_NEEDCOLL 0x0020 /* sqlite3GetFuncCollSeq() might be called*/
-#define SQLITE_FUNC_LENGTH 0x0040 /* Built-in length() function */
-#define SQLITE_FUNC_TYPEOF 0x0080 /* Built-in typeof() function */
-#define SQLITE_FUNC_COUNT 0x0100 /* Built-in count(*) aggregate */
-#define SQLITE_FUNC_COALESCE 0x0200 /* Built-in coalesce() or ifnull() */
-#define SQLITE_FUNC_UNLIKELY 0x0400 /* Built-in unlikely() function */
-#define SQLITE_FUNC_CONSTANT 0x0800 /* Constant inputs give a constant output */
-#define SQLITE_FUNC_MINMAX 0x1000 /* True for min() and max() aggregates */
-#define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a
- ** single query - might change over time */
-
-/*
-** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are
-** used to create the initializers for the FuncDef structures.
-**
-** FUNCTION(zName, nArg, iArg, bNC, xFunc)
-** Used to create a scalar function definition of a function zName
-** implemented by C function xFunc that accepts nArg arguments. The
-** value passed as iArg is cast to a (void*) and made available
-** as the user-data (sqlite3_user_data()) for the function. If
-** argument bNC is true, then the SQLITE_FUNC_NEEDCOLL flag is set.
-**
-** VFUNCTION(zName, nArg, iArg, bNC, xFunc)
-** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag.
-**
-** DFUNCTION(zName, nArg, iArg, bNC, xFunc)
-** Like FUNCTION except it omits the SQLITE_FUNC_CONSTANT flag and
-** adds the SQLITE_FUNC_SLOCHNG flag. Used for date & time functions
-** and functions like sqlite_version() that can change, but not during
-** a single query.
-**
-** AGGREGATE(zName, nArg, iArg, bNC, xStep, xFinal)
-** Used to create an aggregate function definition implemented by
-** the C functions xStep and xFinal. The first four parameters
-** are interpreted in the same way as the first 4 parameters to
-** FUNCTION().
-**
-** LIKEFUNC(zName, nArg, pArg, flags)
-** Used to create a scalar function definition of a function zName
-** that accepts nArg arguments and is implemented by a call to C
-** function likeFunc. Argument pArg is cast to a (void *) and made
-** available as the function user-data (sqlite3_user_data()). The
-** FuncDef.flags variable is set to the value passed as the flags
-** parameter.
-*/
-#define FUNCTION(zName, nArg, iArg, bNC, xFunc) \
- {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} }
-#define VFUNCTION(zName, nArg, iArg, bNC, xFunc) \
- {nArg, SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} }
-#define DFUNCTION(zName, nArg, iArg, bNC, xFunc) \
- {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} }
-#define FUNCTION2(zName, nArg, iArg, bNC, xFunc, extraFlags) \
- {nArg,SQLITE_FUNC_CONSTANT|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL)|extraFlags,\
- SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, #zName, {0} }
-#define STR_FUNCTION(zName, nArg, pArg, bNC, xFunc) \
- {nArg, SQLITE_FUNC_SLOCHNG|SQLITE_UTF8|(bNC*SQLITE_FUNC_NEEDCOLL), \
- pArg, 0, xFunc, 0, #zName, }
-#define LIKEFUNC(zName, nArg, arg, flags) \
- {nArg, SQLITE_FUNC_CONSTANT|SQLITE_UTF8|flags, \
- (void *)arg, 0, likeFunc, 0, #zName, {0} }
-#define AGGREGATE(zName, nArg, arg, nc, xStep, xFinal) \
- {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL), \
- SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,#zName, {0}}
-#define AGGREGATE2(zName, nArg, arg, nc, xStep, xFinal, extraFlags) \
- {nArg, SQLITE_UTF8|(nc*SQLITE_FUNC_NEEDCOLL)|extraFlags, \
- SQLITE_INT_TO_PTR(arg), 0, xStep,xFinal,#zName, {0}}
-
-/*
-** All current savepoints are stored in a linked list starting at
-** sqlite3.pSavepoint. The first element in the list is the most recently
-** opened savepoint. Savepoints are added to the list by the vdbe
-** OP_Savepoint instruction.
-*/
-struct Savepoint {
- char *zName; /* Savepoint name (nul-terminated) */
- i64 nDeferredCons; /* Number of deferred fk violations */
- i64 nDeferredImmCons; /* Number of deferred imm fk. */
- Savepoint *pNext; /* Parent savepoint (if any) */
-};
-
-/*
-** The following are used as the second parameter to sqlite3Savepoint(),
-** and as the P1 argument to the OP_Savepoint instruction.
-*/
-#define SAVEPOINT_BEGIN 0
-#define SAVEPOINT_RELEASE 1
-#define SAVEPOINT_ROLLBACK 2
-
-
-/*
-** Each SQLite module (virtual table definition) is defined by an
-** instance of the following structure, stored in the sqlite3.aModule
-** hash table.
-*/
-struct Module {
- const sqlite3_module *pModule; /* Callback pointers */
- const char *zName; /* Name passed to create_module() */
- void *pAux; /* pAux passed to create_module() */
- void (*xDestroy)(void *); /* Module destructor function */
- Table *pEpoTab; /* Eponymous table for this module */
-};
-
-/*
-** information about each column of an SQL table is held in an instance
-** of this structure.
-*/
-struct Column {
- char *zName; /* Name of this column, \000, then the type */
- Expr *pDflt; /* Default value of this column */
- char *zColl; /* Collating sequence. If NULL, use the default */
- u8 notNull; /* An OE_ code for handling a NOT NULL constraint */
- char affinity; /* One of the SQLITE_AFF_... values */
- u8 szEst; /* Estimated size of value in this column. sizeof(INT)==1 */
- u8 colFlags; /* Boolean properties. See COLFLAG_ defines below */
-};
-
-/* Allowed values for Column.colFlags:
-*/
-#define COLFLAG_PRIMKEY 0x0001 /* Column is part of the primary key */
-#define COLFLAG_HIDDEN 0x0002 /* A hidden column in a virtual table */
-#define COLFLAG_HASTYPE 0x0004 /* Type name follows column name */
-
-/*
-** A "Collating Sequence" is defined by an instance of the following
-** structure. Conceptually, a collating sequence consists of a name and
-** a comparison routine that defines the order of that sequence.
-**
-** If CollSeq.xCmp is NULL, it means that the
-** collating sequence is undefined. Indices built on an undefined
-** collating sequence may not be read or written.
-*/
-struct CollSeq {
- char *zName; /* Name of the collating sequence, UTF-8 encoded */
- u8 enc; /* Text encoding handled by xCmp() */
- void *pUser; /* First argument to xCmp() */
- int (*xCmp)(void*,int, const void*, int, const void*);
- void (*xDel)(void*); /* Destructor for pUser */
-};
-
-/*
-** A sort order can be either ASC or DESC.
-*/
-#define SQLITE_SO_ASC 0 /* Sort in ascending order */
-#define SQLITE_SO_DESC 1 /* Sort in ascending order */
-#define SQLITE_SO_UNDEFINED -1 /* No sort order specified */
-
-/*
-** Column affinity types.
-**
-** These used to have mnemonic name like 'i' for SQLITE_AFF_INTEGER and
-** 't' for SQLITE_AFF_TEXT. But we can save a little space and improve
-** the speed a little by numbering the values consecutively.
-**
-** But rather than start with 0 or 1, we begin with 'A'. That way,
-** when multiple affinity types are concatenated into a string and
-** used as the P4 operand, they will be more readable.
-**
-** Note also that the numeric types are grouped together so that testing
-** for a numeric type is a single comparison. And the BLOB type is first.
-*/
-#define SQLITE_AFF_BLOB 'A'
-#define SQLITE_AFF_TEXT 'B'
-#define SQLITE_AFF_NUMERIC 'C'
-#define SQLITE_AFF_INTEGER 'D'
-#define SQLITE_AFF_REAL 'E'
-
-#define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC)
-
-/*
-** The SQLITE_AFF_MASK values masks off the significant bits of an
-** affinity value.
-*/
-#define SQLITE_AFF_MASK 0x47
-
-/*
-** Additional bit values that can be ORed with an affinity without
-** changing the affinity.
-**
-** The SQLITE_NOTNULL flag is a combination of NULLEQ and JUMPIFNULL.
-** It causes an assert() to fire if either operand to a comparison
-** operator is NULL. It is added to certain comparison operators to
-** prove that the operands are always NOT NULL.
-*/
-#define SQLITE_JUMPIFNULL 0x10 /* jumps if either operand is NULL */
-#define SQLITE_STOREP2 0x20 /* Store result in reg[P2] rather than jump */
-#define SQLITE_NULLEQ 0x80 /* NULL=NULL */
-#define SQLITE_NOTNULL 0x90 /* Assert that operands are never NULL */
-
-/*
-** An object of this type is created for each virtual table present in
-** the database schema.
-**
-** If the database schema is shared, then there is one instance of this
-** structure for each database connection (sqlite3*) that uses the shared
-** schema. This is because each database connection requires its own unique
-** instance of the sqlite3_vtab* handle used to access the virtual table
-** implementation. sqlite3_vtab* handles can not be shared between
-** database connections, even when the rest of the in-memory database
-** schema is shared, as the implementation often stores the database
-** connection handle passed to it via the xConnect() or xCreate() method
-** during initialization internally. This database connection handle may
-** then be used by the virtual table implementation to access real tables
-** within the database. So that they appear as part of the callers
-** transaction, these accesses need to be made via the same database
-** connection as that used to execute SQL operations on the virtual table.
-**
-** All VTable objects that correspond to a single table in a shared
-** database schema are initially stored in a linked-list pointed to by
-** the Table.pVTable member variable of the corresponding Table object.
-** When an sqlite3_prepare() operation is required to access the virtual
-** table, it searches the list for the VTable that corresponds to the
-** database connection doing the preparing so as to use the correct
-** sqlite3_vtab* handle in the compiled query.
-**
-** When an in-memory Table object is deleted (for example when the
-** schema is being reloaded for some reason), the VTable objects are not
-** deleted and the sqlite3_vtab* handles are not xDisconnect()ed
-** immediately. Instead, they are moved from the Table.pVTable list to
-** another linked list headed by the sqlite3.pDisconnect member of the
-** corresponding sqlite3 structure. They are then deleted/xDisconnected
-** next time a statement is prepared using said sqlite3*. This is done
-** to avoid deadlock issues involving multiple sqlite3.mutex mutexes.
-** Refer to comments above function sqlite3VtabUnlockList() for an
-** explanation as to why it is safe to add an entry to an sqlite3.pDisconnect
-** list without holding the corresponding sqlite3.mutex mutex.
-**
-** The memory for objects of this type is always allocated by
-** sqlite3DbMalloc(), using the connection handle stored in VTable.db as
-** the first argument.
-*/
-struct VTable {
- sqlite3 *db; /* Database connection associated with this table */
- Module *pMod; /* Pointer to module implementation */
- sqlite3_vtab *pVtab; /* Pointer to vtab instance */
- int nRef; /* Number of pointers to this structure */
- u8 bConstraint; /* True if constraints are supported */
- int iSavepoint; /* Depth of the SAVEPOINT stack */
- VTable *pNext; /* Next in linked list (see above) */
-};
-
-/*
-** The schema for each SQL table and view is represented in memory
-** by an instance of the following structure.
-*/
-struct Table {
- char *zName; /* Name of the table or view */
- Column *aCol; /* Information about each column */
- Index *pIndex; /* List of SQL indexes on this table. */
- Select *pSelect; /* NULL for tables. Points to definition if a view. */
- FKey *pFKey; /* Linked list of all foreign keys in this table */
- char *zColAff; /* String defining the affinity of each column */
- ExprList *pCheck; /* All CHECK constraints */
- /* ... also used as column name list in a VIEW */
- int tnum; /* Root BTree page for this table */
- i16 iPKey; /* If not negative, use aCol[iPKey] as the rowid */
- i16 nCol; /* Number of columns in this table */
- u16 nRef; /* Number of pointers to this Table */
- LogEst nRowLogEst; /* Estimated rows in table - from sqlite_stat1 table */
- LogEst szTabRow; /* Estimated size of each table row in bytes */
-#ifdef SQLITE_ENABLE_COSTMULT
- LogEst costMult; /* Cost multiplier for using this table */
-#endif
- u8 tabFlags; /* Mask of TF_* values */
- u8 keyConf; /* What to do in case of uniqueness conflict on iPKey */
-#ifndef SQLITE_OMIT_ALTERTABLE
- int addColOffset; /* Offset in CREATE TABLE stmt to add a new column */
-#endif
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- int nModuleArg; /* Number of arguments to the module */
- char **azModuleArg; /* 0: module 1: schema 2: vtab name 3...: args */
- VTable *pVTable; /* List of VTable objects. */
-#endif
- Trigger *pTrigger; /* List of triggers stored in pSchema */
- Schema *pSchema; /* Schema that contains this table */
- Table *pNextZombie; /* Next on the Parse.pZombieTab list */
-};
-
-/*
-** Allowed values for Table.tabFlags.
-**
-** TF_OOOHidden applies to tables or view that have hidden columns that are
-** followed by non-hidden columns. Example: "CREATE VIRTUAL TABLE x USING
-** vtab1(a HIDDEN, b);". Since "b" is a non-hidden column but "a" is hidden,
-** the TF_OOOHidden attribute would apply in this case. Such tables require
-** special handling during INSERT processing.
-*/
-#define TF_Readonly 0x01 /* Read-only system table */
-#define TF_Ephemeral 0x02 /* An ephemeral table */
-#define TF_HasPrimaryKey 0x04 /* Table has a primary key */
-#define TF_Autoincrement 0x08 /* Integer primary key is autoincrement */
-#define TF_Virtual 0x10 /* Is a virtual table */
-#define TF_WithoutRowid 0x20 /* No rowid. PRIMARY KEY is the key */
-#define TF_NoVisibleRowid 0x40 /* No user-visible "rowid" column */
-#define TF_OOOHidden 0x80 /* Out-of-Order hidden columns */
-
-
-/*
-** Test to see whether or not a table is a virtual table. This is
-** done as a macro so that it will be optimized out when virtual
-** table support is omitted from the build.
-*/
-#ifndef SQLITE_OMIT_VIRTUALTABLE
-# define IsVirtual(X) (((X)->tabFlags & TF_Virtual)!=0)
-#else
-# define IsVirtual(X) 0
-#endif
-
-/*
-** Macros to determine if a column is hidden. IsOrdinaryHiddenColumn()
-** only works for non-virtual tables (ordinary tables and views) and is
-** always false unless SQLITE_ENABLE_HIDDEN_COLUMNS is defined. The
-** IsHiddenColumn() macro is general purpose.
-*/
-#if defined(SQLITE_ENABLE_HIDDEN_COLUMNS)
-# define IsHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0)
-# define IsOrdinaryHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0)
-#elif !defined(SQLITE_OMIT_VIRTUALTABLE)
-# define IsHiddenColumn(X) (((X)->colFlags & COLFLAG_HIDDEN)!=0)
-# define IsOrdinaryHiddenColumn(X) 0
-#else
-# define IsHiddenColumn(X) 0
-# define IsOrdinaryHiddenColumn(X) 0
-#endif
-
-
-/* Does the table have a rowid */
-#define HasRowid(X) (((X)->tabFlags & TF_WithoutRowid)==0)
-#define VisibleRowid(X) (((X)->tabFlags & TF_NoVisibleRowid)==0)
-
-/*
-** Each foreign key constraint is an instance of the following structure.
-**
-** A foreign key is associated with two tables. The "from" table is
-** the table that contains the REFERENCES clause that creates the foreign
-** key. The "to" table is the table that is named in the REFERENCES clause.
-** Consider this example:
-**
-** CREATE TABLE ex1(
-** a INTEGER PRIMARY KEY,
-** b INTEGER CONSTRAINT fk1 REFERENCES ex2(x)
-** );
-**
-** For foreign key "fk1", the from-table is "ex1" and the to-table is "ex2".
-** Equivalent names:
-**
-** from-table == child-table
-** to-table == parent-table
-**
-** Each REFERENCES clause generates an instance of the following structure
-** which is attached to the from-table. The to-table need not exist when
-** the from-table is created. The existence of the to-table is not checked.
-**
-** The list of all parents for child Table X is held at X.pFKey.
-**
-** A list of all children for a table named Z (which might not even exist)
-** is held in Schema.fkeyHash with a hash key of Z.
-*/
-struct FKey {
- Table *pFrom; /* Table containing the REFERENCES clause (aka: Child) */
- FKey *pNextFrom; /* Next FKey with the same in pFrom. Next parent of pFrom */
- char *zTo; /* Name of table that the key points to (aka: Parent) */
- FKey *pNextTo; /* Next with the same zTo. Next child of zTo. */
- FKey *pPrevTo; /* Previous with the same zTo */
- int nCol; /* Number of columns in this key */
- /* EV: R-30323-21917 */
- u8 isDeferred; /* True if constraint checking is deferred till COMMIT */
- u8 aAction[2]; /* ON DELETE and ON UPDATE actions, respectively */
- Trigger *apTrigger[2];/* Triggers for aAction[] actions */
- struct sColMap { /* Mapping of columns in pFrom to columns in zTo */
- int iFrom; /* Index of column in pFrom */
- char *zCol; /* Name of column in zTo. If NULL use PRIMARY KEY */
- } aCol[1]; /* One entry for each of nCol columns */
-};
-
-/*
-** SQLite supports many different ways to resolve a constraint
-** error. ROLLBACK processing means that a constraint violation
-** causes the operation in process to fail and for the current transaction
-** to be rolled back. ABORT processing means the operation in process
-** fails and any prior changes from that one operation are backed out,
-** but the transaction is not rolled back. FAIL processing means that
-** the operation in progress stops and returns an error code. But prior
-** changes due to the same operation are not backed out and no rollback
-** occurs. IGNORE means that the particular row that caused the constraint
-** error is not inserted or updated. Processing continues and no error
-** is returned. REPLACE means that preexisting database rows that caused
-** a UNIQUE constraint violation are removed so that the new insert or
-** update can proceed. Processing continues and no error is reported.
-**
-** RESTRICT, SETNULL, and CASCADE actions apply only to foreign keys.
-** RESTRICT is the same as ABORT for IMMEDIATE foreign keys and the
-** same as ROLLBACK for DEFERRED keys. SETNULL means that the foreign
-** key is set to NULL. CASCADE means that a DELETE or UPDATE of the
-** referenced table row is propagated into the row that holds the
-** foreign key.
-**
-** The following symbolic values are used to record which type
-** of action to take.
-*/
-#define OE_None 0 /* There is no constraint to check */
-#define OE_Rollback 1 /* Fail the operation and rollback the transaction */
-#define OE_Abort 2 /* Back out changes but do no rollback transaction */
-#define OE_Fail 3 /* Stop the operation but leave all prior changes */
-#define OE_Ignore 4 /* Ignore the error. Do not do the INSERT or UPDATE */
-#define OE_Replace 5 /* Delete existing record, then do INSERT or UPDATE */
-
-#define OE_Restrict 6 /* OE_Abort for IMMEDIATE, OE_Rollback for DEFERRED */
-#define OE_SetNull 7 /* Set the foreign key value to NULL */
-#define OE_SetDflt 8 /* Set the foreign key value to its default */
-#define OE_Cascade 9 /* Cascade the changes */
-
-#define OE_Default 10 /* Do whatever the default action is */
-
-
-/*
-** An instance of the following structure is passed as the first
-** argument to sqlite3VdbeKeyCompare and is used to control the
-** comparison of the two index keys.
-**
-** Note that aSortOrder[] and aColl[] have nField+1 slots. There
-** are nField slots for the columns of an index then one extra slot
-** for the rowid at the end.
-*/
-struct KeyInfo {
- u32 nRef; /* Number of references to this KeyInfo object */
- u8 enc; /* Text encoding - one of the SQLITE_UTF* values */
- u16 nField; /* Number of key columns in the index */
- u16 nXField; /* Number of columns beyond the key columns */
- sqlite3 *db; /* The database connection */
- u8 *aSortOrder; /* Sort order for each column. */
- CollSeq *aColl[1]; /* Collating sequence for each term of the key */
-};
-
-/*
-** This object holds a record which has been parsed out into individual
-** fields, for the purposes of doing a comparison.
-**
-** A record is an object that contains one or more fields of data.
-** Records are used to store the content of a table row and to store
-** the key of an index. A blob encoding of a record is created by
-** the OP_MakeRecord opcode of the VDBE and is disassembled by the
-** OP_Column opcode.
-**
-** An instance of this object serves as a "key" for doing a search on
-** an index b+tree. The goal of the search is to find the entry that
-** is closed to the key described by this object. This object might hold
-** just a prefix of the key. The number of fields is given by
-** pKeyInfo->nField.
-**
-** The r1 and r2 fields are the values to return if this key is less than
-** or greater than a key in the btree, respectively. These are normally
-** -1 and +1 respectively, but might be inverted to +1 and -1 if the b-tree
-** is in DESC order.
-**
-** The key comparison functions actually return default_rc when they find
-** an equals comparison. default_rc can be -1, 0, or +1. If there are
-** multiple entries in the b-tree with the same key (when only looking
-** at the first pKeyInfo->nFields,) then default_rc can be set to -1 to
-** cause the search to find the last match, or +1 to cause the search to
-** find the first match.
-**
-** The key comparison functions will set eqSeen to true if they ever
-** get and equal results when comparing this structure to a b-tree record.
-** When default_rc!=0, the search might end up on the record immediately
-** before the first match or immediately after the last match. The
-** eqSeen field will indicate whether or not an exact match exists in the
-** b-tree.
-*/
-struct UnpackedRecord {
- KeyInfo *pKeyInfo; /* Collation and sort-order information */
- Mem *aMem; /* Values */
- u16 nField; /* Number of entries in apMem[] */
- i8 default_rc; /* Comparison result if keys are equal */
- u8 errCode; /* Error detected by xRecordCompare (CORRUPT or NOMEM) */
- i8 r1; /* Value to return if (lhs > rhs) */
- i8 r2; /* Value to return if (rhs < lhs) */
- u8 eqSeen; /* True if an equality comparison has been seen */
-};
-
-
-/*
-** Each SQL index is represented in memory by an
-** instance of the following structure.
-**
-** The columns of the table that are to be indexed are described
-** by the aiColumn[] field of this structure. For example, suppose
-** we have the following table and index:
-**
-** CREATE TABLE Ex1(c1 int, c2 int, c3 text);
-** CREATE INDEX Ex2 ON Ex1(c3,c1);
-**
-** In the Table structure describing Ex1, nCol==3 because there are
-** three columns in the table. In the Index structure describing
-** Ex2, nColumn==2 since 2 of the 3 columns of Ex1 are indexed.
-** The value of aiColumn is {2, 0}. aiColumn[0]==2 because the
-** first column to be indexed (c3) has an index of 2 in Ex1.aCol[].
-** The second column to be indexed (c1) has an index of 0 in
-** Ex1.aCol[], hence Ex2.aiColumn[1]==0.
-**
-** The Index.onError field determines whether or not the indexed columns
-** must be unique and what to do if they are not. When Index.onError=OE_None,
-** it means this is not a unique index. Otherwise it is a unique index
-** and the value of Index.onError indicate the which conflict resolution
-** algorithm to employ whenever an attempt is made to insert a non-unique
-** element.
-**
-** While parsing a CREATE TABLE or CREATE INDEX statement in order to
-** generate VDBE code (as opposed to parsing one read from an sqlite_master
-** table as part of parsing an existing database schema), transient instances
-** of this structure may be created. In this case the Index.tnum variable is
-** used to store the address of a VDBE instruction, not a database page
-** number (it cannot - the database page is not allocated until the VDBE
-** program is executed). See convertToWithoutRowidTable() for details.
-*/
-struct Index {
- char *zName; /* Name of this index */
- i16 *aiColumn; /* Which columns are used by this index. 1st is 0 */
- LogEst *aiRowLogEst; /* From ANALYZE: Est. rows selected by each column */
- Table *pTable; /* The SQL table being indexed */
- char *zColAff; /* String defining the affinity of each column */
- Index *pNext; /* The next index associated with the same table */
- Schema *pSchema; /* Schema containing this index */
- u8 *aSortOrder; /* for each column: True==DESC, False==ASC */
- const char **azColl; /* Array of collation sequence names for index */
- Expr *pPartIdxWhere; /* WHERE clause for partial indices */
- ExprList *aColExpr; /* Column expressions */
- int tnum; /* DB Page containing root of this index */
- LogEst szIdxRow; /* Estimated average row size in bytes */
- u16 nKeyCol; /* Number of columns forming the key */
- u16 nColumn; /* Number of columns stored in the index */
- u8 onError; /* OE_Abort, OE_Ignore, OE_Replace, or OE_None */
- unsigned idxType:2; /* 1==UNIQUE, 2==PRIMARY KEY, 0==CREATE INDEX */
- unsigned bUnordered:1; /* Use this index for == or IN queries only */
- unsigned uniqNotNull:1; /* True if UNIQUE and NOT NULL for all columns */
- unsigned isResized:1; /* True if resizeIndexObject() has been called */
- unsigned isCovering:1; /* True if this is a covering index */
- unsigned noSkipScan:1; /* Do not try to use skip-scan if true */
-#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
- int nSample; /* Number of elements in aSample[] */
- int nSampleCol; /* Size of IndexSample.anEq[] and so on */
- tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */
- IndexSample *aSample; /* Samples of the left-most key */
- tRowcnt *aiRowEst; /* Non-logarithmic stat1 data for this index */
- tRowcnt nRowEst0; /* Non-logarithmic number of rows in the index */
-#endif
-};
-
-/*
-** Allowed values for Index.idxType
-*/
-#define SQLITE_IDXTYPE_APPDEF 0 /* Created using CREATE INDEX */
-#define SQLITE_IDXTYPE_UNIQUE 1 /* Implements a UNIQUE constraint */
-#define SQLITE_IDXTYPE_PRIMARYKEY 2 /* Is the PRIMARY KEY for the table */
-
-/* Return true if index X is a PRIMARY KEY index */
-#define IsPrimaryKeyIndex(X) ((X)->idxType==SQLITE_IDXTYPE_PRIMARYKEY)
-
-/* Return true if index X is a UNIQUE index */
-#define IsUniqueIndex(X) ((X)->onError!=OE_None)
-
-/* The Index.aiColumn[] values are normally positive integer. But
-** there are some negative values that have special meaning:
-*/
-#define XN_ROWID (-1) /* Indexed column is the rowid */
-#define XN_EXPR (-2) /* Indexed column is an expression */
-
-/*
-** Each sample stored in the sqlite_stat3 table is represented in memory
-** using a structure of this type. See documentation at the top of the
-** analyze.c source file for additional information.
-*/
-struct IndexSample {
- void *p; /* Pointer to sampled record */
- int n; /* Size of record in bytes */
- tRowcnt *anEq; /* Est. number of rows where the key equals this sample */
- tRowcnt *anLt; /* Est. number of rows where key is less than this sample */
- tRowcnt *anDLt; /* Est. number of distinct keys less than this sample */
-};
-
-/*
-** Each token coming out of the lexer is an instance of
-** this structure. Tokens are also used as part of an expression.
-**
-** Note if Token.z==0 then Token.dyn and Token.n are undefined and
-** may contain random values. Do not make any assumptions about Token.dyn
-** and Token.n when Token.z==0.
-*/
-struct Token {
- const char *z; /* Text of the token. Not NULL-terminated! */
- unsigned int n; /* Number of characters in this token */
-};
-
-/*
-** An instance of this structure contains information needed to generate
-** code for a SELECT that contains aggregate functions.
-**
-** If Expr.op==TK_AGG_COLUMN or TK_AGG_FUNCTION then Expr.pAggInfo is a
-** pointer to this structure. The Expr.iColumn field is the index in
-** AggInfo.aCol[] or AggInfo.aFunc[] of information needed to generate
-** code for that node.
-**
-** AggInfo.pGroupBy and AggInfo.aFunc.pExpr point to fields within the
-** original Select structure that describes the SELECT statement. These
-** fields do not need to be freed when deallocating the AggInfo structure.
-*/
-struct AggInfo {
- u8 directMode; /* Direct rendering mode means take data directly
- ** from source tables rather than from accumulators */
- u8 useSortingIdx; /* In direct mode, reference the sorting index rather
- ** than the source table */
- int sortingIdx; /* Cursor number of the sorting index */
- int sortingIdxPTab; /* Cursor number of pseudo-table */
- int nSortingColumn; /* Number of columns in the sorting index */
- int mnReg, mxReg; /* Range of registers allocated for aCol and aFunc */
- ExprList *pGroupBy; /* The group by clause */
- struct AggInfo_col { /* For each column used in source tables */
- Table *pTab; /* Source table */
- int iTable; /* Cursor number of the source table */
- int iColumn; /* Column number within the source table */
- int iSorterColumn; /* Column number in the sorting index */
- int iMem; /* Memory location that acts as accumulator */
- Expr *pExpr; /* The original expression */
- } *aCol;
- int nColumn; /* Number of used entries in aCol[] */
- int nAccumulator; /* Number of columns that show through to the output.
- ** Additional columns are used only as parameters to
- ** aggregate functions */
- struct AggInfo_func { /* For each aggregate function */
- Expr *pExpr; /* Expression encoding the function */
- FuncDef *pFunc; /* The aggregate function implementation */
- int iMem; /* Memory location that acts as accumulator */
- int iDistinct; /* Ephemeral table used to enforce DISTINCT */
- } *aFunc;
- int nFunc; /* Number of entries in aFunc[] */
-};
-
-/*
-** The datatype ynVar is a signed integer, either 16-bit or 32-bit.
-** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater
-** than 32767 we have to make it 32-bit. 16-bit is preferred because
-** it uses less memory in the Expr object, which is a big memory user
-** in systems with lots of prepared statements. And few applications
-** need more than about 10 or 20 variables. But some extreme users want
-** to have prepared statements with over 32767 variables, and for them
-** the option is available (at compile-time).
-*/
-#if SQLITE_MAX_VARIABLE_NUMBER<=32767
-typedef i16 ynVar;
-#else
-typedef int ynVar;
-#endif
-
-/*
-** Each node of an expression in the parse tree is an instance
-** of this structure.
-**
-** Expr.op is the opcode. The integer parser token codes are reused
-** as opcodes here. For example, the parser defines TK_GE to be an integer
-** code representing the ">=" operator. This same integer code is reused
-** to represent the greater-than-or-equal-to operator in the expression
-** tree.
-**
-** If the expression is an SQL literal (TK_INTEGER, TK_FLOAT, TK_BLOB,
-** or TK_STRING), then Expr.token contains the text of the SQL literal. If
-** the expression is a variable (TK_VARIABLE), then Expr.token contains the
-** variable name. Finally, if the expression is an SQL function (TK_FUNCTION),
-** then Expr.token contains the name of the function.
-**
-** Expr.pRight and Expr.pLeft are the left and right subexpressions of a
-** binary operator. Either or both may be NULL.
-**
-** Expr.x.pList is a list of arguments if the expression is an SQL function,
-** a CASE expression or an IN expression of the form " IN (, ...)".
-** Expr.x.pSelect is used if the expression is a sub-select or an expression of
-** the form " IN (SELECT ...)". If the EP_xIsSelect bit is set in the
-** Expr.flags mask, then Expr.x.pSelect is valid. Otherwise, Expr.x.pList is
-** valid.
-**
-** An expression of the form ID or ID.ID refers to a column in a table.
-** For such expressions, Expr.op is set to TK_COLUMN and Expr.iTable is
-** the integer cursor number of a VDBE cursor pointing to that table and
-** Expr.iColumn is the column number for the specific column. If the
-** expression is used as a result in an aggregate SELECT, then the
-** value is also stored in the Expr.iAgg column in the aggregate so that
-** it can be accessed after all aggregates are computed.
-**
-** If the expression is an unbound variable marker (a question mark
-** character '?' in the original SQL) then the Expr.iTable holds the index
-** number for that variable.
-**
-** If the expression is a subquery then Expr.iColumn holds an integer
-** register number containing the result of the subquery. If the
-** subquery gives a constant result, then iTable is -1. If the subquery
-** gives a different answer at different times during statement processing
-** then iTable is the address of a subroutine that computes the subquery.
-**
-** If the Expr is of type OP_Column, and the table it is selecting from
-** is a disk table or the "old.*" pseudo-table, then pTab points to the
-** corresponding table definition.
-**
-** ALLOCATION NOTES:
-**
-** Expr objects can use a lot of memory space in database schema. To
-** help reduce memory requirements, sometimes an Expr object will be
-** truncated. And to reduce the number of memory allocations, sometimes
-** two or more Expr objects will be stored in a single memory allocation,
-** together with Expr.zToken strings.
-**
-** If the EP_Reduced and EP_TokenOnly flags are set when
-** an Expr object is truncated. When EP_Reduced is set, then all
-** the child Expr objects in the Expr.pLeft and Expr.pRight subtrees
-** are contained within the same memory allocation. Note, however, that
-** the subtrees in Expr.x.pList or Expr.x.pSelect are always separately
-** allocated, regardless of whether or not EP_Reduced is set.
-*/
-struct Expr {
- u8 op; /* Operation performed by this node */
- char affinity; /* The affinity of the column or 0 if not a column */
- u32 flags; /* Various flags. EP_* See below */
- union {
- char *zToken; /* Token value. Zero terminated and dequoted */
- int iValue; /* Non-negative integer value if EP_IntValue */
- } u;
-
- /* If the EP_TokenOnly flag is set in the Expr.flags mask, then no
- ** space is allocated for the fields below this point. An attempt to
- ** access them will result in a segfault or malfunction.
- *********************************************************************/
-
- Expr *pLeft; /* Left subnode */
- Expr *pRight; /* Right subnode */
- union {
- ExprList *pList; /* op = IN, EXISTS, SELECT, CASE, FUNCTION, BETWEEN */
- Select *pSelect; /* EP_xIsSelect and op = IN, EXISTS, SELECT */
- } x;
-
- /* If the EP_Reduced flag is set in the Expr.flags mask, then no
- ** space is allocated for the fields below this point. An attempt to
- ** access them will result in a segfault or malfunction.
- *********************************************************************/
-
-#if SQLITE_MAX_EXPR_DEPTH>0
- int nHeight; /* Height of the tree headed by this node */
-#endif
- int iTable; /* TK_COLUMN: cursor number of table holding column
- ** TK_REGISTER: register number
- ** TK_TRIGGER: 1 -> new, 0 -> old
- ** EP_Unlikely: 134217728 times likelihood */
- ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid.
- ** TK_VARIABLE: variable number (always >= 1). */
- i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */
- i16 iRightJoinTable; /* If EP_FromJoin, the right table of the join */
- u8 op2; /* TK_REGISTER: original value of Expr.op
- ** TK_COLUMN: the value of p5 for OP_Column
- ** TK_AGG_FUNCTION: nesting depth */
- AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */
- Table *pTab; /* Table for TK_COLUMN expressions. */
-};
-
-/*
-** The following are the meanings of bits in the Expr.flags field.
-*/
-#define EP_FromJoin 0x000001 /* Originates in ON/USING clause of outer join */
-#define EP_Agg 0x000002 /* Contains one or more aggregate functions */
-#define EP_Resolved 0x000004 /* IDs have been resolved to COLUMNs */
-#define EP_Error 0x000008 /* Expression contains one or more errors */
-#define EP_Distinct 0x000010 /* Aggregate function with DISTINCT keyword */
-#define EP_VarSelect 0x000020 /* pSelect is correlated, not constant */
-#define EP_DblQuoted 0x000040 /* token.z was originally in "..." */
-#define EP_InfixFunc 0x000080 /* True for an infix function: LIKE, GLOB, etc */
-#define EP_Collate 0x000100 /* Tree contains a TK_COLLATE operator */
-#define EP_Generic 0x000200 /* Ignore COLLATE or affinity on this tree */
-#define EP_IntValue 0x000400 /* Integer value contained in u.iValue */
-#define EP_xIsSelect 0x000800 /* x.pSelect is valid (otherwise x.pList is) */
-#define EP_Skip 0x001000 /* COLLATE, AS, or UNLIKELY */
-#define EP_Reduced 0x002000 /* Expr struct EXPR_REDUCEDSIZE bytes only */
-#define EP_TokenOnly 0x004000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */
-#define EP_Static 0x008000 /* Held in memory not obtained from malloc() */
-#define EP_MemToken 0x010000 /* Need to sqlite3DbFree() Expr.zToken */
-#define EP_NoReduce 0x020000 /* Cannot EXPRDUP_REDUCE this Expr */
-#define EP_Unlikely 0x040000 /* unlikely() or likelihood() function */
-#define EP_ConstFunc 0x080000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */
-#define EP_CanBeNull 0x100000 /* Can be null despite NOT NULL constraint */
-#define EP_Subquery 0x200000 /* Tree contains a TK_SELECT operator */
-#define EP_Alias 0x400000 /* Is an alias for a result set column */
-
-/*
-** Combinations of two or more EP_* flags
-*/
-#define EP_Propagate (EP_Collate|EP_Subquery) /* Propagate these bits up tree */
-
-/*
-** These macros can be used to test, set, or clear bits in the
-** Expr.flags field.
-*/
-#define ExprHasProperty(E,P) (((E)->flags&(P))!=0)
-#define ExprHasAllProperty(E,P) (((E)->flags&(P))==(P))
-#define ExprSetProperty(E,P) (E)->flags|=(P)
-#define ExprClearProperty(E,P) (E)->flags&=~(P)
-
-/* The ExprSetVVAProperty() macro is used for Verification, Validation,
-** and Accreditation only. It works like ExprSetProperty() during VVA
-** processes but is a no-op for delivery.
-*/
-#ifdef SQLITE_DEBUG
-# define ExprSetVVAProperty(E,P) (E)->flags|=(P)
-#else
-# define ExprSetVVAProperty(E,P)
-#endif
-
-/*
-** Macros to determine the number of bytes required by a normal Expr
-** struct, an Expr struct with the EP_Reduced flag set in Expr.flags
-** and an Expr struct with the EP_TokenOnly flag set.
-*/
-#define EXPR_FULLSIZE sizeof(Expr) /* Full size */
-#define EXPR_REDUCEDSIZE offsetof(Expr,iTable) /* Common features */
-#define EXPR_TOKENONLYSIZE offsetof(Expr,pLeft) /* Fewer features */
-
-/*
-** Flags passed to the sqlite3ExprDup() function. See the header comment
-** above sqlite3ExprDup() for details.
-*/
-#define EXPRDUP_REDUCE 0x0001 /* Used reduced-size Expr nodes */
-
-/*
-** A list of expressions. Each expression may optionally have a
-** name. An expr/name combination can be used in several ways, such
-** as the list of "expr AS ID" fields following a "SELECT" or in the
-** list of "ID = expr" items in an UPDATE. A list of expressions can
-** also be used as the argument to a function, in which case the a.zName
-** field is not used.
-**
-** By default the Expr.zSpan field holds a human-readable description of
-** the expression that is used in the generation of error messages and
-** column labels. In this case, Expr.zSpan is typically the text of a
-** column expression as it exists in a SELECT statement. However, if
-** the bSpanIsTab flag is set, then zSpan is overloaded to mean the name
-** of the result column in the form: DATABASE.TABLE.COLUMN. This later
-** form is used for name resolution with nested FROM clauses.
-*/
-struct ExprList {
- int nExpr; /* Number of expressions on the list */
- struct ExprList_item { /* For each expression in the list */
- Expr *pExpr; /* The list of expressions */
- char *zName; /* Token associated with this expression */
- char *zSpan; /* Original text of the expression */
- u8 sortOrder; /* 1 for DESC or 0 for ASC */
- unsigned done :1; /* A flag to indicate when processing is finished */
- unsigned bSpanIsTab :1; /* zSpan holds DB.TABLE.COLUMN */
- unsigned reusable :1; /* Constant expression is reusable */
- union {
- struct {
- u16 iOrderByCol; /* For ORDER BY, column number in result set */
- u16 iAlias; /* Index into Parse.aAlias[] for zName */
- } x;
- int iConstExprReg; /* Register in which Expr value is cached */
- } u;
- } *a; /* Alloc a power of two greater or equal to nExpr */
-};
-
-/*
-** An instance of this structure is used by the parser to record both
-** the parse tree for an expression and the span of input text for an
-** expression.
-*/
-struct ExprSpan {
- Expr *pExpr; /* The expression parse tree */
- const char *zStart; /* First character of input text */
- const char *zEnd; /* One character past the end of input text */
-};
-
-/*
-** An instance of this structure can hold a simple list of identifiers,
-** such as the list "a,b,c" in the following statements:
-**
-** INSERT INTO t(a,b,c) VALUES ...;
-** CREATE INDEX idx ON t(a,b,c);
-** CREATE TRIGGER trig BEFORE UPDATE ON t(a,b,c) ...;
-**
-** The IdList.a.idx field is used when the IdList represents the list of
-** column names after a table name in an INSERT statement. In the statement
-**
-** INSERT INTO t(a,b,c) ...
-**
-** If "a" is the k-th column of table "t", then IdList.a[0].idx==k.
-*/
-struct IdList {
- struct IdList_item {
- char *zName; /* Name of the identifier */
- int idx; /* Index in some Table.aCol[] of a column named zName */
- } *a;
- int nId; /* Number of identifiers on the list */
-};
-
-/*
-** The bitmask datatype defined below is used for various optimizations.
-**
-** Changing this from a 64-bit to a 32-bit type limits the number of
-** tables in a join to 32 instead of 64. But it also reduces the size
-** of the library by 738 bytes on ix86.
-*/
-#ifdef SQLITE_BITMASK_TYPE
- typedef SQLITE_BITMASK_TYPE Bitmask;
-#else
- typedef u64 Bitmask;
-#endif
-
-/*
-** The number of bits in a Bitmask. "BMS" means "BitMask Size".
-*/
-#define BMS ((int)(sizeof(Bitmask)*8))
-
-/*
-** A bit in a Bitmask
-*/
-#define MASKBIT(n) (((Bitmask)1)<<(n))
-#define MASKBIT32(n) (((unsigned int)1)<<(n))
-#define ALLBITS ((Bitmask)-1)
-
-/*
-** The following structure describes the FROM clause of a SELECT statement.
-** Each table or subquery in the FROM clause is a separate element of
-** the SrcList.a[] array.
-**
-** With the addition of multiple database support, the following structure
-** can also be used to describe a particular table such as the table that
-** is modified by an INSERT, DELETE, or UPDATE statement. In standard SQL,
-** such a table must be a simple name: ID. But in SQLite, the table can
-** now be identified by a database name, a dot, then the table name: ID.ID.
-**
-** The jointype starts out showing the join type between the current table
-** and the next table on the list. The parser builds the list this way.
-** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each
-** jointype expresses the join between the table and the previous table.
-**
-** In the colUsed field, the high-order bit (bit 63) is set if the table
-** contains more than 63 columns and the 64-th or later column is used.
-*/
-struct SrcList {
- int nSrc; /* Number of tables or subqueries in the FROM clause */
- u32 nAlloc; /* Number of entries allocated in a[] below */
- struct SrcList_item {
- Schema *pSchema; /* Schema to which this item is fixed */
- char *zDatabase; /* Name of database holding this table */
- char *zName; /* Name of the table */
- char *zAlias; /* The "B" part of a "A AS B" phrase. zName is the "A" */
- Table *pTab; /* An SQL table corresponding to zName */
- Select *pSelect; /* A SELECT statement used in place of a table name */
- int addrFillSub; /* Address of subroutine to manifest a subquery */
- int regReturn; /* Register holding return address of addrFillSub */
- int regResult; /* Registers holding results of a co-routine */
- struct {
- u8 jointype; /* Type of join between this table and the previous */
- unsigned notIndexed :1; /* True if there is a NOT INDEXED clause */
- unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */
- unsigned isTabFunc :1; /* True if table-valued-function syntax */
- unsigned isCorrelated :1; /* True if sub-query is correlated */
- unsigned viaCoroutine :1; /* Implemented as a co-routine */
- unsigned isRecursive :1; /* True for recursive reference in WITH */
- } fg;
-#ifndef SQLITE_OMIT_EXPLAIN
- u8 iSelectId; /* If pSelect!=0, the id of the sub-select in EQP */
-#endif
- int iCursor; /* The VDBE cursor number used to access this table */
- Expr *pOn; /* The ON clause of a join */
- IdList *pUsing; /* The USING clause of a join */
- Bitmask colUsed; /* Bit N (1<" clause */
- ExprList *pFuncArg; /* Arguments to table-valued-function */
- } u1;
- Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */
- } a[1]; /* One entry for each identifier on the list */
-};
-
-/*
-** Permitted values of the SrcList.a.jointype field
-*/
-#define JT_INNER 0x0001 /* Any kind of inner or cross join */
-#define JT_CROSS 0x0002 /* Explicit use of the CROSS keyword */
-#define JT_NATURAL 0x0004 /* True for a "natural" join */
-#define JT_LEFT 0x0008 /* Left outer join */
-#define JT_RIGHT 0x0010 /* Right outer join */
-#define JT_OUTER 0x0020 /* The "OUTER" keyword is present */
-#define JT_ERROR 0x0040 /* unknown or unsupported join type */
-
-
-/*
-** Flags appropriate for the wctrlFlags parameter of sqlite3WhereBegin()
-** and the WhereInfo.wctrlFlags member.
-**
-** Value constraints (enforced via assert()):
-** WHERE_USE_LIMIT == SF_FixedLimit
-*/
-#define WHERE_ORDERBY_NORMAL 0x0000 /* No-op */
-#define WHERE_ORDERBY_MIN 0x0001 /* ORDER BY processing for min() func */
-#define WHERE_ORDERBY_MAX 0x0002 /* ORDER BY processing for max() func */
-#define WHERE_ONEPASS_DESIRED 0x0004 /* Want to do one-pass UPDATE/DELETE */
-#define WHERE_ONEPASS_MULTIROW 0x0008 /* ONEPASS is ok with multiple rows */
-#define WHERE_DUPLICATES_OK 0x0010 /* Ok to return a row more than once */
-#define WHERE_OR_SUBCLAUSE 0x0020 /* Processing a sub-WHERE as part of
- ** the OR optimization */
-#define WHERE_GROUPBY 0x0040 /* pOrderBy is really a GROUP BY */
-#define WHERE_DISTINCTBY 0x0080 /* pOrderby is really a DISTINCT clause */
-#define WHERE_WANT_DISTINCT 0x0100 /* All output needs to be distinct */
-#define WHERE_SORTBYGROUP 0x0200 /* Support sqlite3WhereIsSorted() */
-#define WHERE_SEEK_TABLE 0x0400 /* Do not defer seeks on main table */
-#define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */
- /* 0x1000 not currently used */
- /* 0x2000 not currently used */
-#define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */
- /* 0x8000 not currently used */
-
-/* Allowed return values from sqlite3WhereIsDistinct()
-*/
-#define WHERE_DISTINCT_NOOP 0 /* DISTINCT keyword not used */
-#define WHERE_DISTINCT_UNIQUE 1 /* No duplicates */
-#define WHERE_DISTINCT_ORDERED 2 /* All duplicates are adjacent */
-#define WHERE_DISTINCT_UNORDERED 3 /* Duplicates are scattered */
-
-/*
-** A NameContext defines a context in which to resolve table and column
-** names. The context consists of a list of tables (the pSrcList) field and
-** a list of named expression (pEList). The named expression list may
-** be NULL. The pSrc corresponds to the FROM clause of a SELECT or
-** to the table being operated on by INSERT, UPDATE, or DELETE. The
-** pEList corresponds to the result set of a SELECT and is NULL for
-** other statements.
-**
-** NameContexts can be nested. When resolving names, the inner-most
-** context is searched first. If no match is found, the next outer
-** context is checked. If there is still no match, the next context
-** is checked. This process continues until either a match is found
-** or all contexts are check. When a match is found, the nRef member of
-** the context containing the match is incremented.
-**
-** Each subquery gets a new NameContext. The pNext field points to the
-** NameContext in the parent query. Thus the process of scanning the
-** NameContext list corresponds to searching through successively outer
-** subqueries looking for a match.
-*/
-struct NameContext {
- Parse *pParse; /* The parser */
- SrcList *pSrcList; /* One or more tables used to resolve names */
- ExprList *pEList; /* Optional list of result-set columns */
- AggInfo *pAggInfo; /* Information about aggregates at this level */
- NameContext *pNext; /* Next outer name context. NULL for outermost */
- int nRef; /* Number of names resolved by this context */
- int nErr; /* Number of errors encountered while resolving names */
- u16 ncFlags; /* Zero or more NC_* flags defined below */
-};
-
-/*
-** Allowed values for the NameContext, ncFlags field.
-**
-** Value constraints (all checked via assert()):
-** NC_HasAgg == SF_HasAgg
-** NC_MinMaxAgg == SF_MinMaxAgg == SQLITE_FUNC_MINMAX
-**
-*/
-#define NC_AllowAgg 0x0001 /* Aggregate functions are allowed here */
-#define NC_PartIdx 0x0002 /* True if resolving a partial index WHERE */
-#define NC_IsCheck 0x0004 /* True if resolving names in a CHECK constraint */
-#define NC_InAggFunc 0x0008 /* True if analyzing arguments to an agg func */
-#define NC_HasAgg 0x0010 /* One or more aggregate functions seen */
-#define NC_IdxExpr 0x0020 /* True if resolving columns of CREATE INDEX */
-#define NC_VarSelect 0x0040 /* A correlated subquery has been seen */
-#define NC_MinMaxAgg 0x1000 /* min/max aggregates seen. See note above */
-
-/*
-** An instance of the following structure contains all information
-** needed to generate code for a single SELECT statement.
-**
-** nLimit is set to -1 if there is no LIMIT clause. nOffset is set to 0.
-** If there is a LIMIT clause, the parser sets nLimit to the value of the
-** limit and nOffset to the value of the offset (or 0 if there is not
-** offset). But later on, nLimit and nOffset become the memory locations
-** in the VDBE that record the limit and offset counters.
-**
-** addrOpenEphm[] entries contain the address of OP_OpenEphemeral opcodes.
-** These addresses must be stored so that we can go back and fill in
-** the P4_KEYINFO and P2 parameters later. Neither the KeyInfo nor
-** the number of columns in P2 can be computed at the same time
-** as the OP_OpenEphm instruction is coded because not
-** enough information about the compound query is known at that point.
-** The KeyInfo for addrOpenTran[0] and [1] contains collating sequences
-** for the result set. The KeyInfo for addrOpenEphm[2] contains collating
-** sequences for the ORDER BY clause.
-*/
-struct Select {
- ExprList *pEList; /* The fields of the result */
- u8 op; /* One of: TK_UNION TK_ALL TK_INTERSECT TK_EXCEPT */
- LogEst nSelectRow; /* Estimated number of result rows */
- u32 selFlags; /* Various SF_* values */
- int iLimit, iOffset; /* Memory registers holding LIMIT & OFFSET counters */
-#if SELECTTRACE_ENABLED
- char zSelName[12]; /* Symbolic name of this SELECT use for debugging */
-#endif
- int addrOpenEphm[2]; /* OP_OpenEphem opcodes related to this select */
- SrcList *pSrc; /* The FROM clause */
- Expr *pWhere; /* The WHERE clause */
- ExprList *pGroupBy; /* The GROUP BY clause */
- Expr *pHaving; /* The HAVING clause */
- ExprList *pOrderBy; /* The ORDER BY clause */
- Select *pPrior; /* Prior select in a compound select statement */
- Select *pNext; /* Next select to the left in a compound */
- Expr *pLimit; /* LIMIT expression. NULL means not used. */
- Expr *pOffset; /* OFFSET expression. NULL means not used. */
- With *pWith; /* WITH clause attached to this select. Or NULL. */
-};
-
-/*
-** Allowed values for Select.selFlags. The "SF" prefix stands for
-** "Select Flag".
-**
-** Value constraints (all checked via assert())
-** SF_HasAgg == NC_HasAgg
-** SF_MinMaxAgg == NC_MinMaxAgg == SQLITE_FUNC_MINMAX
-** SF_FixedLimit == WHERE_USE_LIMIT
-*/
-#define SF_Distinct 0x00001 /* Output should be DISTINCT */
-#define SF_All 0x00002 /* Includes the ALL keyword */
-#define SF_Resolved 0x00004 /* Identifiers have been resolved */
-#define SF_Aggregate 0x00008 /* Contains agg functions or a GROUP BY */
-#define SF_HasAgg 0x00010 /* Contains aggregate functions */
-#define SF_UsesEphemeral 0x00020 /* Uses the OpenEphemeral opcode */
-#define SF_Expanded 0x00040 /* sqlite3SelectExpand() called on this */
-#define SF_HasTypeInfo 0x00080 /* FROM subqueries have Table metadata */
-#define SF_Compound 0x00100 /* Part of a compound query */
-#define SF_Values 0x00200 /* Synthesized from VALUES clause */
-#define SF_MultiValue 0x00400 /* Single VALUES term with multiple rows */
-#define SF_NestedFrom 0x00800 /* Part of a parenthesized FROM clause */
-#define SF_MinMaxAgg 0x01000 /* Aggregate containing min() or max() */
-#define SF_Recursive 0x02000 /* The recursive part of a recursive CTE */
-#define SF_FixedLimit 0x04000 /* nSelectRow set by a constant LIMIT */
-#define SF_MaybeConvert 0x08000 /* Need convertCompoundSelectToSubquery() */
-#define SF_Converted 0x10000 /* By convertCompoundSelectToSubquery() */
-#define SF_IncludeHidden 0x20000 /* Include hidden columns in output */
-
-
-/*
-** The results of a SELECT can be distributed in several ways, as defined
-** by one of the following macros. The "SRT" prefix means "SELECT Result
-** Type".
-**
-** SRT_Union Store results as a key in a temporary index
-** identified by pDest->iSDParm.
-**
-** SRT_Except Remove results from the temporary index pDest->iSDParm.
-**
-** SRT_Exists Store a 1 in memory cell pDest->iSDParm if the result
-** set is not empty.
-**
-** SRT_Discard Throw the results away. This is used by SELECT
-** statements within triggers whose only purpose is
-** the side-effects of functions.
-**
-** All of the above are free to ignore their ORDER BY clause. Those that
-** follow must honor the ORDER BY clause.
-**
-** SRT_Output Generate a row of output (using the OP_ResultRow
-** opcode) for each row in the result set.
-**
-** SRT_Mem Only valid if the result is a single column.
-** Store the first column of the first result row
-** in register pDest->iSDParm then abandon the rest
-** of the query. This destination implies "LIMIT 1".
-**
-** SRT_Set The result must be a single column. Store each
-** row of result as the key in table pDest->iSDParm.
-** Apply the affinity pDest->affSdst before storing
-** results. Used to implement "IN (SELECT ...)".
-**
-** SRT_EphemTab Create an temporary table pDest->iSDParm and store
-** the result there. The cursor is left open after
-** returning. This is like SRT_Table except that
-** this destination uses OP_OpenEphemeral to create
-** the table first.
-**
-** SRT_Coroutine Generate a co-routine that returns a new row of
-** results each time it is invoked. The entry point
-** of the co-routine is stored in register pDest->iSDParm
-** and the result row is stored in pDest->nDest registers
-** starting with pDest->iSdst.
-**
-** SRT_Table Store results in temporary table pDest->iSDParm.
-** SRT_Fifo This is like SRT_EphemTab except that the table
-** is assumed to already be open. SRT_Fifo has
-** the additional property of being able to ignore
-** the ORDER BY clause.
-**
-** SRT_DistFifo Store results in a temporary table pDest->iSDParm.
-** But also use temporary table pDest->iSDParm+1 as
-** a record of all prior results and ignore any duplicate
-** rows. Name means: "Distinct Fifo".
-**
-** SRT_Queue Store results in priority queue pDest->iSDParm (really
-** an index). Append a sequence number so that all entries
-** are distinct.
-**
-** SRT_DistQueue Store results in priority queue pDest->iSDParm only if
-** the same record has never been stored before. The
-** index at pDest->iSDParm+1 hold all prior stores.
-*/
-#define SRT_Union 1 /* Store result as keys in an index */
-#define SRT_Except 2 /* Remove result from a UNION index */
-#define SRT_Exists 3 /* Store 1 if the result is not empty */
-#define SRT_Discard 4 /* Do not save the results anywhere */
-#define SRT_Fifo 5 /* Store result as data with an automatic rowid */
-#define SRT_DistFifo 6 /* Like SRT_Fifo, but unique results only */
-#define SRT_Queue 7 /* Store result in an queue */
-#define SRT_DistQueue 8 /* Like SRT_Queue, but unique results only */
-
-/* The ORDER BY clause is ignored for all of the above */
-#define IgnorableOrderby(X) ((X->eDest)<=SRT_DistQueue)
-
-#define SRT_Output 9 /* Output each row of result */
-#define SRT_Mem 10 /* Store result in a memory cell */
-#define SRT_Set 11 /* Store results as keys in an index */
-#define SRT_EphemTab 12 /* Create transient tab and store like SRT_Table */
-#define SRT_Coroutine 13 /* Generate a single row of result */
-#define SRT_Table 14 /* Store result as data with an automatic rowid */
-
-/*
-** An instance of this object describes where to put of the results of
-** a SELECT statement.
-*/
-struct SelectDest {
- u8 eDest; /* How to dispose of the results. On of SRT_* above. */
- char affSdst; /* Affinity used when eDest==SRT_Set */
- int iSDParm; /* A parameter used by the eDest disposal method */
- int iSdst; /* Base register where results are written */
- int nSdst; /* Number of registers allocated */
- ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */
-};
-
-/*
-** During code generation of statements that do inserts into AUTOINCREMENT
-** tables, the following information is attached to the Table.u.autoInc.p
-** pointer of each autoincrement table to record some side information that
-** the code generator needs. We have to keep per-table autoincrement
-** information in case inserts are done within triggers. Triggers do not
-** normally coordinate their activities, but we do need to coordinate the
-** loading and saving of autoincrement information.
-*/
-struct AutoincInfo {
- AutoincInfo *pNext; /* Next info block in a list of them all */
- Table *pTab; /* Table this info block refers to */
- int iDb; /* Index in sqlite3.aDb[] of database holding pTab */
- int regCtr; /* Memory register holding the rowid counter */
-};
-
-/*
-** Size of the column cache
-*/
-#ifndef SQLITE_N_COLCACHE
-# define SQLITE_N_COLCACHE 10
-#endif
-
-/*
-** At least one instance of the following structure is created for each
-** trigger that may be fired while parsing an INSERT, UPDATE or DELETE
-** statement. All such objects are stored in the linked list headed at
-** Parse.pTriggerPrg and deleted once statement compilation has been
-** completed.
-**
-** A Vdbe sub-program that implements the body and WHEN clause of trigger
-** TriggerPrg.pTrigger, assuming a default ON CONFLICT clause of
-** TriggerPrg.orconf, is stored in the TriggerPrg.pProgram variable.
-** The Parse.pTriggerPrg list never contains two entries with the same
-** values for both pTrigger and orconf.
-**
-** The TriggerPrg.aColmask[0] variable is set to a mask of old.* columns
-** accessed (or set to 0 for triggers fired as a result of INSERT
-** statements). Similarly, the TriggerPrg.aColmask[1] variable is set to
-** a mask of new.* columns used by the program.
-*/
-struct TriggerPrg {
- Trigger *pTrigger; /* Trigger this program was coded from */
- TriggerPrg *pNext; /* Next entry in Parse.pTriggerPrg list */
- SubProgram *pProgram; /* Program implementing pTrigger/orconf */
- int orconf; /* Default ON CONFLICT policy */
- u32 aColmask[2]; /* Masks of old.*, new.* columns accessed */
-};
-
-/*
-** The yDbMask datatype for the bitmask of all attached databases.
-*/
-#if SQLITE_MAX_ATTACHED>30
- typedef unsigned char yDbMask[(SQLITE_MAX_ATTACHED+9)/8];
-# define DbMaskTest(M,I) (((M)[(I)/8]&(1<<((I)&7)))!=0)
-# define DbMaskZero(M) memset((M),0,sizeof(M))
-# define DbMaskSet(M,I) (M)[(I)/8]|=(1<<((I)&7))
-# define DbMaskAllZero(M) sqlite3DbMaskAllZero(M)
-# define DbMaskNonZero(M) (sqlite3DbMaskAllZero(M)==0)
-#else
- typedef unsigned int yDbMask;
-# define DbMaskTest(M,I) (((M)&(((yDbMask)1)<<(I)))!=0)
-# define DbMaskZero(M) (M)=0
-# define DbMaskSet(M,I) (M)|=(((yDbMask)1)<<(I))
-# define DbMaskAllZero(M) (M)==0
-# define DbMaskNonZero(M) (M)!=0
-#endif
-
-/*
-** An SQL parser context. A copy of this structure is passed through
-** the parser and down into all the parser action routine in order to
-** carry around information that is global to the entire parse.
-**
-** The structure is divided into two parts. When the parser and code
-** generate call themselves recursively, the first part of the structure
-** is constant but the second part is reset at the beginning and end of
-** each recursion.
-**
-** The nTableLock and aTableLock variables are only used if the shared-cache
-** feature is enabled (if sqlite3Tsd()->useSharedData is true). They are
-** used to store the set of table-locks required by the statement being
-** compiled. Function sqlite3TableLock() is used to add entries to the
-** list.
-*/
-struct Parse {
- sqlite3 *db; /* The main database structure */
- char *zErrMsg; /* An error message */
- Vdbe *pVdbe; /* An engine for executing database bytecode */
- int rc; /* Return code from execution */
- u8 colNamesSet; /* TRUE after OP_ColumnName has been issued to pVdbe */
- u8 checkSchema; /* Causes schema cookie check after an error */
- u8 nested; /* Number of nested calls to the parser/code generator */
- u8 nTempReg; /* Number of temporary registers in aTempReg[] */
- u8 isMultiWrite; /* True if statement may modify/insert multiple rows */
- u8 mayAbort; /* True if statement may throw an ABORT exception */
- u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */
- u8 okConstFactor; /* OK to factor out constants */
- u8 disableLookaside; /* Number of times lookaside has been disabled */
- u8 nColCache; /* Number of entries in aColCache[] */
- int aTempReg[8]; /* Holding area for temporary registers */
- int nRangeReg; /* Size of the temporary register block */
- int iRangeReg; /* First register in temporary register block */
- int nErr; /* Number of errors seen */
- int nTab; /* Number of previously allocated VDBE cursors */
- int nMem; /* Number of memory cells used so far */
- int nSet; /* Number of sets used so far */
- int nOnce; /* Number of OP_Once instructions so far */
- int nOpAlloc; /* Number of slots allocated for Vdbe.aOp[] */
- int szOpAlloc; /* Bytes of memory space allocated for Vdbe.aOp[] */
- int iFixedOp; /* Never back out opcodes iFixedOp-1 or earlier */
- int ckBase; /* Base register of data during check constraints */
- int iSelfTab; /* Table of an index whose exprs are being coded */
- int iCacheLevel; /* ColCache valid when aColCache[].iLevel<=iCacheLevel */
- int iCacheCnt; /* Counter used to generate aColCache[].lru values */
- int nLabel; /* Number of labels used */
- int *aLabel; /* Space to hold the labels */
- struct yColCache {
- int iTable; /* Table cursor number */
- i16 iColumn; /* Table column number */
- u8 tempReg; /* iReg is a temp register that needs to be freed */
- int iLevel; /* Nesting level */
- int iReg; /* Reg with value of this column. 0 means none. */
- int lru; /* Least recently used entry has the smallest value */
- } aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */
- ExprList *pConstExpr;/* Constant expressions */
- Token constraintName;/* Name of the constraint currently being parsed */
- yDbMask writeMask; /* Start a write transaction on these databases */
- yDbMask cookieMask; /* Bitmask of schema verified databases */
- int cookieValue[SQLITE_MAX_ATTACHED+2]; /* Values of cookies to verify */
- int regRowid; /* Register holding rowid of CREATE TABLE entry */
- int regRoot; /* Register holding root page number for new objects */
- int nMaxArg; /* Max args passed to user function by sub-program */
-#if SELECTTRACE_ENABLED
- int nSelect; /* Number of SELECT statements seen */
- int nSelectIndent; /* How far to indent SELECTTRACE() output */
-#endif
-#ifndef SQLITE_OMIT_SHARED_CACHE
- int nTableLock; /* Number of locks in aTableLock */
- TableLock *aTableLock; /* Required table locks for shared-cache mode */
-#endif
- AutoincInfo *pAinc; /* Information about AUTOINCREMENT counters */
-
- /* Information used while coding trigger programs. */
- Parse *pToplevel; /* Parse structure for main program (or NULL) */
- Table *pTriggerTab; /* Table triggers are being coded for */
- int addrCrTab; /* Address of OP_CreateTable opcode on CREATE TABLE */
- u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */
- u32 oldmask; /* Mask of old.* columns referenced */
- u32 newmask; /* Mask of new.* columns referenced */
- u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */
- u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */
- u8 disableTriggers; /* True to disable triggers */
-
- /************************************************************************
- ** Above is constant between recursions. Below is reset before and after
- ** each recursion. The boundary between these two regions is determined
- ** using offsetof(Parse,nVar) so the nVar field must be the first field
- ** in the recursive region.
- ************************************************************************/
-
- ynVar nVar; /* Number of '?' variables seen in the SQL so far */
- int nzVar; /* Number of available slots in azVar[] */
- u8 iPkSortOrder; /* ASC or DESC for INTEGER PRIMARY KEY */
- u8 explain; /* True if the EXPLAIN flag is found on the query */
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- u8 declareVtab; /* True if inside sqlite3_declare_vtab() */
- int nVtabLock; /* Number of virtual tables to lock */
-#endif
- int nAlias; /* Number of aliased result set columns */
- int nHeight; /* Expression tree height of current sub-select */
-#ifndef SQLITE_OMIT_EXPLAIN
- int iSelectId; /* ID of current select for EXPLAIN output */
- int iNextSelectId; /* Next available select ID for EXPLAIN output */
-#endif
- char **azVar; /* Pointers to names of parameters */
- Vdbe *pReprepare; /* VM being reprepared (sqlite3Reprepare()) */
- const char *zTail; /* All SQL text past the last semicolon parsed */
- Table *pNewTable; /* A table being constructed by CREATE TABLE */
- Trigger *pNewTrigger; /* Trigger under construct by a CREATE TRIGGER */
- const char *zAuthContext; /* The 6th parameter to db->xAuth callbacks */
- Token sNameToken; /* Token with unqualified schema object name */
- Token sLastToken; /* The last token parsed */
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- Token sArg; /* Complete text of a module argument */
- Table **apVtabLock; /* Pointer to virtual tables needing locking */
-#endif
- Table *pZombieTab; /* List of Table objects to delete after code gen */
- TriggerPrg *pTriggerPrg; /* Linked list of coded triggers */
- With *pWith; /* Current WITH clause, or NULL */
- With *pWithToFree; /* Free this WITH object at the end of the parse */
-};
-
-/*
-** Return true if currently inside an sqlite3_declare_vtab() call.
-*/
-#ifdef SQLITE_OMIT_VIRTUALTABLE
- #define IN_DECLARE_VTAB 0
-#else
- #define IN_DECLARE_VTAB (pParse->declareVtab)
-#endif
-
-/*
-** An instance of the following structure can be declared on a stack and used
-** to save the Parse.zAuthContext value so that it can be restored later.
-*/
-struct AuthContext {
- const char *zAuthContext; /* Put saved Parse.zAuthContext here */
- Parse *pParse; /* The Parse structure */
-};
-
-/*
-** Bitfield flags for P5 value in various opcodes.
-**
-** Value constraints (enforced via assert()):
-** OPFLAG_LENGTHARG == SQLITE_FUNC_LENGTH
-** OPFLAG_TYPEOFARG == SQLITE_FUNC_TYPEOF
-** OPFLAG_BULKCSR == BTREE_BULKLOAD
-** OPFLAG_SEEKEQ == BTREE_SEEK_EQ
-** OPFLAG_FORDELETE == BTREE_FORDELETE
-** OPFLAG_SAVEPOSITION == BTREE_SAVEPOSITION
-** OPFLAG_AUXDELETE == BTREE_AUXDELETE
-*/
-#define OPFLAG_NCHANGE 0x01 /* OP_Insert: Set to update db->nChange */
- /* Also used in P2 (not P5) of OP_Delete */
-#define OPFLAG_EPHEM 0x01 /* OP_Column: Ephemeral output is ok */
-#define OPFLAG_LASTROWID 0x02 /* Set to update db->lastRowid */
-#define OPFLAG_ISUPDATE 0x04 /* This OP_Insert is an sql UPDATE */
-#define OPFLAG_APPEND 0x08 /* This is likely to be an append */
-#define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */
-#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
-#define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */
-#endif
-#define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */
-#define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */
-#define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */
-#define OPFLAG_SEEKEQ 0x02 /* OP_Open** cursor uses EQ seek only */
-#define OPFLAG_FORDELETE 0x08 /* OP_Open should use BTREE_FORDELETE */
-#define OPFLAG_P2ISREG 0x10 /* P2 to OP_Open** is a register number */
-#define OPFLAG_PERMUTE 0x01 /* OP_Compare: use the permutation */
-#define OPFLAG_SAVEPOSITION 0x02 /* OP_Delete: keep cursor position */
-#define OPFLAG_AUXDELETE 0x04 /* OP_Delete: index in a DELETE op */
-
-/*
- * Each trigger present in the database schema is stored as an instance of
- * struct Trigger.
- *
- * Pointers to instances of struct Trigger are stored in two ways.
- * 1. In the "trigHash" hash table (part of the sqlite3* that represents the
- * database). This allows Trigger structures to be retrieved by name.
- * 2. All triggers associated with a single table form a linked list, using the
- * pNext member of struct Trigger. A pointer to the first element of the
- * linked list is stored as the "pTrigger" member of the associated
- * struct Table.
- *
- * The "step_list" member points to the first element of a linked list
- * containing the SQL statements specified as the trigger program.
- */
-struct Trigger {
- char *zName; /* The name of the trigger */
- char *table; /* The table or view to which the trigger applies */
- u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT */
- u8 tr_tm; /* One of TRIGGER_BEFORE, TRIGGER_AFTER */
- Expr *pWhen; /* The WHEN clause of the expression (may be NULL) */
- IdList *pColumns; /* If this is an UPDATE OF trigger,
- the is stored here */
- Schema *pSchema; /* Schema containing the trigger */
- Schema *pTabSchema; /* Schema containing the table */
- TriggerStep *step_list; /* Link list of trigger program steps */
- Trigger *pNext; /* Next trigger associated with the table */
-};
-
-/*
-** A trigger is either a BEFORE or an AFTER trigger. The following constants
-** determine which.
-**
-** If there are multiple triggers, you might of some BEFORE and some AFTER.
-** In that cases, the constants below can be ORed together.
-*/
-#define TRIGGER_BEFORE 1
-#define TRIGGER_AFTER 2
-
-/*
- * An instance of struct TriggerStep is used to store a single SQL statement
- * that is a part of a trigger-program.
- *
- * Instances of struct TriggerStep are stored in a singly linked list (linked
- * using the "pNext" member) referenced by the "step_list" member of the
- * associated struct Trigger instance. The first element of the linked list is
- * the first step of the trigger-program.
- *
- * The "op" member indicates whether this is a "DELETE", "INSERT", "UPDATE" or
- * "SELECT" statement. The meanings of the other members is determined by the
- * value of "op" as follows:
- *
- * (op == TK_INSERT)
- * orconf -> stores the ON CONFLICT algorithm
- * pSelect -> If this is an INSERT INTO ... SELECT ... statement, then
- * this stores a pointer to the SELECT statement. Otherwise NULL.
- * zTarget -> Dequoted name of the table to insert into.
- * pExprList -> If this is an INSERT INTO ... VALUES ... statement, then
- * this stores values to be inserted. Otherwise NULL.
- * pIdList -> If this is an INSERT INTO ... () VALUES ...
- * statement, then this stores the column-names to be
- * inserted into.
- *
- * (op == TK_DELETE)
- * zTarget -> Dequoted name of the table to delete from.
- * pWhere -> The WHERE clause of the DELETE statement if one is specified.
- * Otherwise NULL.
- *
- * (op == TK_UPDATE)
- * zTarget -> Dequoted name of the table to update.
- * pWhere -> The WHERE clause of the UPDATE statement if one is specified.
- * Otherwise NULL.
- * pExprList -> A list of the columns to update and the expressions to update
- * them to. See sqlite3Update() documentation of "pChanges"
- * argument.
- *
- */
-struct TriggerStep {
- u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT */
- u8 orconf; /* OE_Rollback etc. */
- Trigger *pTrig; /* The trigger that this step is a part of */
- Select *pSelect; /* SELECT statement or RHS of INSERT INTO SELECT ... */
- char *zTarget; /* Target table for DELETE, UPDATE, INSERT */
- Expr *pWhere; /* The WHERE clause for DELETE or UPDATE steps */
- ExprList *pExprList; /* SET clause for UPDATE. */
- IdList *pIdList; /* Column names for INSERT */
- TriggerStep *pNext; /* Next in the link-list */
- TriggerStep *pLast; /* Last element in link-list. Valid for 1st elem only */
-};
-
-/*
-** The following structure contains information used by the sqliteFix...
-** routines as they walk the parse tree to make database references
-** explicit.
-*/
-typedef struct DbFixer DbFixer;
-struct DbFixer {
- Parse *pParse; /* The parsing context. Error messages written here */
- Schema *pSchema; /* Fix items to this schema */
- int bVarOnly; /* Check for variable references only */
- const char *zDb; /* Make sure all objects are contained in this database */
- const char *zType; /* Type of the container - used for error messages */
- const Token *pName; /* Name of the container - used for error messages */
-};
-
-/*
-** An objected used to accumulate the text of a string where we
-** do not necessarily know how big the string will be in the end.
-*/
-struct StrAccum {
- sqlite3 *db; /* Optional database for lookaside. Can be NULL */
- char *zBase; /* A base allocation. Not from malloc. */
- char *zText; /* The string collected so far */
- u32 nChar; /* Length of the string so far */
- u32 nAlloc; /* Amount of space allocated in zText */
- u32 mxAlloc; /* Maximum allowed allocation. 0 for no malloc usage */
- u8 accError; /* STRACCUM_NOMEM or STRACCUM_TOOBIG */
- u8 printfFlags; /* SQLITE_PRINTF flags below */
-};
-#define STRACCUM_NOMEM 1
-#define STRACCUM_TOOBIG 2
-#define SQLITE_PRINTF_INTERNAL 0x01 /* Internal-use-only converters allowed */
-#define SQLITE_PRINTF_SQLFUNC 0x02 /* SQL function arguments to VXPrintf */
-#define SQLITE_PRINTF_MALLOCED 0x04 /* True if xText is allocated space */
-
-#define isMalloced(X) (((X)->printfFlags & SQLITE_PRINTF_MALLOCED)!=0)
-
-
-/*
-** A pointer to this structure is used to communicate information
-** from sqlite3Init and OP_ParseSchema into the sqlite3InitCallback.
-*/
-typedef struct {
- sqlite3 *db; /* The database being initialized */
- char **pzErrMsg; /* Error message stored here */
- int iDb; /* 0 for main database. 1 for TEMP, 2.. for ATTACHed */
- int rc; /* Result code stored here */
-} InitData;
-
-/*
-** Structure containing global configuration data for the SQLite library.
-**
-** This structure also contains some state information.
-*/
-struct Sqlite3Config {
- int bMemstat; /* True to enable memory status */
- int bCoreMutex; /* True to enable core mutexing */
- int bFullMutex; /* True to enable full mutexing */
- int bOpenUri; /* True to interpret filenames as URIs */
- int bUseCis; /* Use covering indices for full-scans */
- int mxStrlen; /* Maximum string length */
- int neverCorrupt; /* Database is always well-formed */
- int szLookaside; /* Default lookaside buffer size */
- int nLookaside; /* Default lookaside buffer count */
- int nStmtSpill; /* Stmt-journal spill-to-disk threshold */
- sqlite3_mem_methods m; /* Low-level memory allocation interface */
- sqlite3_mutex_methods mutex; /* Low-level mutex interface */
- sqlite3_pcache_methods2 pcache2; /* Low-level page-cache interface */
- void *pHeap; /* Heap storage space */
- int nHeap; /* Size of pHeap[] */
- int mnReq, mxReq; /* Min and max heap requests sizes */
- sqlite3_int64 szMmap; /* mmap() space per open file */
- sqlite3_int64 mxMmap; /* Maximum value for szMmap */
- void *pScratch; /* Scratch memory */
- int szScratch; /* Size of each scratch buffer */
- int nScratch; /* Number of scratch buffers */
- void *pPage; /* Page cache memory */
- int szPage; /* Size of each page in pPage[] */
- int nPage; /* Number of pages in pPage[] */
- int mxParserStack; /* maximum depth of the parser stack */
- int sharedCacheEnabled; /* true if shared-cache mode enabled */
- u32 szPma; /* Maximum Sorter PMA size */
- /* The above might be initialized to non-zero. The following need to always
- ** initially be zero, however. */
- int isInit; /* True after initialization has finished */
- int inProgress; /* True while initialization in progress */
- int isMutexInit; /* True after mutexes are initialized */
- int isMallocInit; /* True after malloc is initialized */
- int isPCacheInit; /* True after malloc is initialized */
- int nRefInitMutex; /* Number of users of pInitMutex */
- sqlite3_mutex *pInitMutex; /* Mutex used by sqlite3_initialize() */
- void (*xLog)(void*,int,const char*); /* Function for logging */
- void *pLogArg; /* First argument to xLog() */
-#ifdef SQLITE_ENABLE_SQLLOG
- void(*xSqllog)(void*,sqlite3*,const char*, int);
- void *pSqllogArg;
-#endif
-#ifdef SQLITE_VDBE_COVERAGE
- /* The following callback (if not NULL) is invoked on every VDBE branch
- ** operation. Set the callback using SQLITE_TESTCTRL_VDBE_COVERAGE.
- */
- void (*xVdbeBranch)(void*,int iSrcLine,u8 eThis,u8 eMx); /* Callback */
- void *pVdbeBranchArg; /* 1st argument */
-#endif
-#ifndef SQLITE_OMIT_BUILTIN_TEST
- int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */
-#endif
- int bLocaltimeFault; /* True to fail localtime() calls */
-};
-
-/*
-** This macro is used inside of assert() statements to indicate that
-** the assert is only valid on a well-formed database. Instead of:
-**
-** assert( X );
-**
-** One writes:
-**
-** assert( X || CORRUPT_DB );
-**
-** CORRUPT_DB is true during normal operation. CORRUPT_DB does not indicate
-** that the database is definitely corrupt, only that it might be corrupt.
-** For most test cases, CORRUPT_DB is set to false using a special
-** sqlite3_test_control(). This enables assert() statements to prove
-** things that are always true for well-formed databases.
-*/
-#define CORRUPT_DB (sqlite3Config.neverCorrupt==0)
-
-/*
-** Context pointer passed down through the tree-walk.
-*/
-struct Walker {
- Parse *pParse; /* Parser context. */
- int (*xExprCallback)(Walker*, Expr*); /* Callback for expressions */
- int (*xSelectCallback)(Walker*,Select*); /* Callback for SELECTs */
- void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */
- int walkerDepth; /* Number of subqueries */
- u8 eCode; /* A small processing code */
- union { /* Extra data for callback */
- NameContext *pNC; /* Naming context */
- int n; /* A counter */
- int iCur; /* A cursor number */
- SrcList *pSrcList; /* FROM clause */
- struct SrcCount *pSrcCount; /* Counting column references */
- struct CCurHint *pCCurHint; /* Used by codeCursorHint() */
- int *aiCol; /* array of column indexes */
- struct IdxCover *pIdxCover; /* Check for index coverage */
- } u;
-};
-
-/* Forward declarations */
-SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*);
-SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*);
-SQLITE_PRIVATE int sqlite3WalkSelect(Walker*, Select*);
-SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker*, Select*);
-SQLITE_PRIVATE int sqlite3WalkSelectFrom(Walker*, Select*);
-SQLITE_PRIVATE int sqlite3ExprWalkNoop(Walker*, Expr*);
-
-/*
-** Return code from the parse-tree walking primitives and their
-** callbacks.
-*/
-#define WRC_Continue 0 /* Continue down into children */
-#define WRC_Prune 1 /* Omit children but continue walking siblings */
-#define WRC_Abort 2 /* Abandon the tree walk */
-
-/*
-** An instance of this structure represents a set of one or more CTEs
-** (common table expressions) created by a single WITH clause.
-*/
-struct With {
- int nCte; /* Number of CTEs in the WITH clause */
- With *pOuter; /* Containing WITH clause, or NULL */
- struct Cte { /* For each CTE in the WITH clause.... */
- char *zName; /* Name of this CTE */
- ExprList *pCols; /* List of explicit column names, or NULL */
- Select *pSelect; /* The definition of this CTE */
- const char *zCteErr; /* Error message for circular references */
- } a[1];
-};
-
-#ifdef SQLITE_DEBUG
-/*
-** An instance of the TreeView object is used for printing the content of
-** data structures on sqlite3DebugPrintf() using a tree-like view.
-*/
-struct TreeView {
- int iLevel; /* Which level of the tree we are on */
- u8 bLine[100]; /* Draw vertical in column i if bLine[i] is true */
-};
-#endif /* SQLITE_DEBUG */
-
-/*
-** Assuming zIn points to the first byte of a UTF-8 character,
-** advance zIn to point to the first byte of the next UTF-8 character.
-*/
-#define SQLITE_SKIP_UTF8(zIn) { \
- if( (*(zIn++))>=0xc0 ){ \
- while( (*zIn & 0xc0)==0x80 ){ zIn++; } \
- } \
-}
-
-/*
-** The SQLITE_*_BKPT macros are substitutes for the error codes with
-** the same name but without the _BKPT suffix. These macros invoke
-** routines that report the line-number on which the error originated
-** using sqlite3_log(). The routines also provide a convenient place
-** to set a debugger breakpoint.
-*/
-SQLITE_PRIVATE int sqlite3CorruptError(int);
-SQLITE_PRIVATE int sqlite3MisuseError(int);
-SQLITE_PRIVATE int sqlite3CantopenError(int);
-#define SQLITE_CORRUPT_BKPT sqlite3CorruptError(__LINE__)
-#define SQLITE_MISUSE_BKPT sqlite3MisuseError(__LINE__)
-#define SQLITE_CANTOPEN_BKPT sqlite3CantopenError(__LINE__)
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE int sqlite3NomemError(int);
-SQLITE_PRIVATE int sqlite3IoerrnomemError(int);
-# define SQLITE_NOMEM_BKPT sqlite3NomemError(__LINE__)
-# define SQLITE_IOERR_NOMEM_BKPT sqlite3IoerrnomemError(__LINE__)
-#else
-# define SQLITE_NOMEM_BKPT SQLITE_NOMEM
-# define SQLITE_IOERR_NOMEM_BKPT SQLITE_IOERR_NOMEM
-#endif
-
-/*
-** FTS3 and FTS4 both require virtual table support
-*/
-#if defined(SQLITE_OMIT_VIRTUALTABLE)
-# undef SQLITE_ENABLE_FTS3
-# undef SQLITE_ENABLE_FTS4
-#endif
-
-/*
-** FTS4 is really an extension for FTS3. It is enabled using the
-** SQLITE_ENABLE_FTS3 macro. But to avoid confusion we also call
-** the SQLITE_ENABLE_FTS4 macro to serve as an alias for SQLITE_ENABLE_FTS3.
-*/
-#if defined(SQLITE_ENABLE_FTS4) && !defined(SQLITE_ENABLE_FTS3)
-# define SQLITE_ENABLE_FTS3 1
-#endif
-
-/*
-** The ctype.h header is needed for non-ASCII systems. It is also
-** needed by FTS3 when FTS3 is included in the amalgamation.
-*/
-#if !defined(SQLITE_ASCII) || \
- (defined(SQLITE_ENABLE_FTS3) && defined(SQLITE_AMALGAMATION))
-# include
-#endif
-
-/*
-** The following macros mimic the standard library functions toupper(),
-** isspace(), isalnum(), isdigit() and isxdigit(), respectively. The
-** sqlite versions only work for ASCII characters, regardless of locale.
-*/
-#ifdef SQLITE_ASCII
-# define sqlite3Toupper(x) ((x)&~(sqlite3CtypeMap[(unsigned char)(x)]&0x20))
-# define sqlite3Isspace(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x01)
-# define sqlite3Isalnum(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x06)
-# define sqlite3Isalpha(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x02)
-# define sqlite3Isdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x04)
-# define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08)
-# define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)])
-# define sqlite3Isquote(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x80)
-#else
-# define sqlite3Toupper(x) toupper((unsigned char)(x))
-# define sqlite3Isspace(x) isspace((unsigned char)(x))
-# define sqlite3Isalnum(x) isalnum((unsigned char)(x))
-# define sqlite3Isalpha(x) isalpha((unsigned char)(x))
-# define sqlite3Isdigit(x) isdigit((unsigned char)(x))
-# define sqlite3Isxdigit(x) isxdigit((unsigned char)(x))
-# define sqlite3Tolower(x) tolower((unsigned char)(x))
-# define sqlite3Isquote(x) ((x)=='"'||(x)=='\''||(x)=='['||(x)=='`')
-#endif
-#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
-SQLITE_PRIVATE int sqlite3IsIdChar(u8);
-#endif
-
-/*
-** Internal function prototypes
-*/
-SQLITE_PRIVATE int sqlite3StrICmp(const char*,const char*);
-SQLITE_PRIVATE int sqlite3Strlen30(const char*);
-SQLITE_PRIVATE char *sqlite3ColumnType(Column*,char*);
-#define sqlite3StrNICmp sqlite3_strnicmp
-
-SQLITE_PRIVATE int sqlite3MallocInit(void);
-SQLITE_PRIVATE void sqlite3MallocEnd(void);
-SQLITE_PRIVATE void *sqlite3Malloc(u64);
-SQLITE_PRIVATE void *sqlite3MallocZero(u64);
-SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3*, u64);
-SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3*, u64);
-SQLITE_PRIVATE void *sqlite3DbMallocRawNN(sqlite3*, u64);
-SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3*,const char*);
-SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3*,const char*, u64);
-SQLITE_PRIVATE void *sqlite3Realloc(void*, u64);
-SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *, void *, u64);
-SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *, void *, u64);
-SQLITE_PRIVATE void sqlite3DbFree(sqlite3*, void*);
-SQLITE_PRIVATE int sqlite3MallocSize(void*);
-SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3*, void*);
-SQLITE_PRIVATE void *sqlite3ScratchMalloc(int);
-SQLITE_PRIVATE void sqlite3ScratchFree(void*);
-SQLITE_PRIVATE void *sqlite3PageMalloc(int);
-SQLITE_PRIVATE void sqlite3PageFree(void*);
-SQLITE_PRIVATE void sqlite3MemSetDefault(void);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
-SQLITE_PRIVATE void sqlite3BenignMallocHooks(void (*)(void), void (*)(void));
-#endif
-SQLITE_PRIVATE int sqlite3HeapNearlyFull(void);
-
-/*
-** On systems with ample stack space and that support alloca(), make
-** use of alloca() to obtain space for large automatic objects. By default,
-** obtain space from malloc().
-**
-** The alloca() routine never returns NULL. This will cause code paths
-** that deal with sqlite3StackAlloc() failures to be unreachable.
-*/
-#ifdef SQLITE_USE_ALLOCA
-# define sqlite3StackAllocRaw(D,N) alloca(N)
-# define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N)
-# define sqlite3StackFree(D,P)
-#else
-# define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N)
-# define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N)
-# define sqlite3StackFree(D,P) sqlite3DbFree(D,P)
-#endif
-
-/* Do not allow both MEMSYS5 and MEMSYS3 to be defined together. If they
-** are, disable MEMSYS3
-*/
-#ifdef SQLITE_ENABLE_MEMSYS5
-SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void);
-#undef SQLITE_ENABLE_MEMSYS3
-#endif
-#ifdef SQLITE_ENABLE_MEMSYS3
-SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void);
-#endif
-
-
-#ifndef SQLITE_MUTEX_OMIT
-SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void);
-SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void);
-SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int);
-SQLITE_PRIVATE int sqlite3MutexInit(void);
-SQLITE_PRIVATE int sqlite3MutexEnd(void);
-#endif
-#if !defined(SQLITE_MUTEX_OMIT) && !defined(SQLITE_MUTEX_NOOP)
-SQLITE_PRIVATE void sqlite3MemoryBarrier(void);
-#else
-# define sqlite3MemoryBarrier()
-#endif
-
-SQLITE_PRIVATE sqlite3_int64 sqlite3StatusValue(int);
-SQLITE_PRIVATE void sqlite3StatusUp(int, int);
-SQLITE_PRIVATE void sqlite3StatusDown(int, int);
-SQLITE_PRIVATE void sqlite3StatusHighwater(int, int);
-
-/* Access to mutexes used by sqlite3_status() */
-SQLITE_PRIVATE sqlite3_mutex *sqlite3Pcache1Mutex(void);
-SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void);
-
-#ifndef SQLITE_OMIT_FLOATING_POINT
-SQLITE_PRIVATE int sqlite3IsNaN(double);
-#else
-# define sqlite3IsNaN(X) 0
-#endif
-
-/*
-** An instance of the following structure holds information about SQL
-** functions arguments that are the parameters to the printf() function.
-*/
-struct PrintfArguments {
- int nArg; /* Total number of arguments */
- int nUsed; /* Number of arguments used so far */
- sqlite3_value **apArg; /* The argument values */
-};
-
-SQLITE_PRIVATE void sqlite3VXPrintf(StrAccum*, const char*, va_list);
-SQLITE_PRIVATE void sqlite3XPrintf(StrAccum*, const char*, ...);
-SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...);
-SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list);
-#if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE)
-SQLITE_PRIVATE void sqlite3DebugPrintf(const char*, ...);
-#endif
-#if defined(SQLITE_TEST)
-SQLITE_PRIVATE void *sqlite3TestTextToPtr(const char*);
-#endif
-
-#if defined(SQLITE_DEBUG)
-SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView*, const Expr*, u8);
-SQLITE_PRIVATE void sqlite3TreeViewExprList(TreeView*, const ExprList*, u8, const char*);
-SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView*, const Select*, u8);
-SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView*, const With*, u8);
-#endif
-
-
-SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*);
-SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...);
-SQLITE_PRIVATE void sqlite3Dequote(char*);
-SQLITE_PRIVATE void sqlite3TokenInit(Token*,char*);
-SQLITE_PRIVATE int sqlite3KeywordCode(const unsigned char*, int);
-SQLITE_PRIVATE int sqlite3RunParser(Parse*, const char*, char **);
-SQLITE_PRIVATE void sqlite3FinishCoding(Parse*);
-SQLITE_PRIVATE int sqlite3GetTempReg(Parse*);
-SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse*,int);
-SQLITE_PRIVATE int sqlite3GetTempRange(Parse*,int);
-SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse*,int,int);
-SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse*);
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse*,int,int);
-#endif
-SQLITE_PRIVATE Expr *sqlite3ExprAlloc(sqlite3*,int,const Token*,int);
-SQLITE_PRIVATE Expr *sqlite3Expr(sqlite3*,int,const char*);
-SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(sqlite3*,Expr*,Expr*,Expr*);
-SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*, const Token*);
-SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*);
-SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3*,Expr*, Expr*);
-SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*);
-SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*);
-SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*);
-SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*);
-SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int);
-SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,Token*,int);
-SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,ExprSpan*);
-SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*);
-SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*);
-SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**);
-SQLITE_PRIVATE int sqlite3InitCallback(void*, int, char**, char**);
-SQLITE_PRIVATE void sqlite3Pragma(Parse*,Token*,Token*,Token*,int);
-SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3*);
-SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3*,int);
-SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3*);
-SQLITE_PRIVATE void sqlite3CommitInternalChanges(sqlite3*);
-SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3*,Table*);
-SQLITE_PRIVATE int sqlite3ColumnsFromExprList(Parse*,ExprList*,i16*,Column**);
-SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation(Parse*,Table*,Select*);
-SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*);
-SQLITE_PRIVATE void sqlite3OpenMasterTable(Parse *, int);
-SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*);
-SQLITE_PRIVATE i16 sqlite3ColumnOfIndex(Index*, i16);
-SQLITE_PRIVATE void sqlite3StartTable(Parse*,Token*,Token*,int,int,int,int);
-#if SQLITE_ENABLE_HIDDEN_COLUMNS
-SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table*, Column*);
-#else
-# define sqlite3ColumnPropertiesFromName(T,C) /* no-op */
-#endif
-SQLITE_PRIVATE void sqlite3AddColumn(Parse*,Token*,Token*);
-SQLITE_PRIVATE void sqlite3AddNotNull(Parse*, int);
-SQLITE_PRIVATE void sqlite3AddPrimaryKey(Parse*, ExprList*, int, int, int);
-SQLITE_PRIVATE void sqlite3AddCheckConstraint(Parse*, Expr*);
-SQLITE_PRIVATE void sqlite3AddDefaultValue(Parse*,ExprSpan*);
-SQLITE_PRIVATE void sqlite3AddCollateType(Parse*, Token*);
-SQLITE_PRIVATE void sqlite3EndTable(Parse*,Token*,Token*,u8,Select*);
-SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*,
- sqlite3_vfs**,char**,char **);
-SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3*,const char*);
-SQLITE_PRIVATE int sqlite3CodeOnce(Parse *);
-
-#ifdef SQLITE_OMIT_BUILTIN_TEST
-# define sqlite3FaultSim(X) SQLITE_OK
-#else
-SQLITE_PRIVATE int sqlite3FaultSim(int);
-#endif
-
-SQLITE_PRIVATE Bitvec *sqlite3BitvecCreate(u32);
-SQLITE_PRIVATE int sqlite3BitvecTest(Bitvec*, u32);
-SQLITE_PRIVATE int sqlite3BitvecTestNotNull(Bitvec*, u32);
-SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec*, u32);
-SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec*, u32, void*);
-SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec*);
-SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec*);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
-SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int,int*);
-#endif
-
-SQLITE_PRIVATE RowSet *sqlite3RowSetInit(sqlite3*, void*, unsigned int);
-SQLITE_PRIVATE void sqlite3RowSetClear(RowSet*);
-SQLITE_PRIVATE void sqlite3RowSetInsert(RowSet*, i64);
-SQLITE_PRIVATE int sqlite3RowSetTest(RowSet*, int iBatch, i64);
-SQLITE_PRIVATE int sqlite3RowSetNext(RowSet*, i64*);
-
-SQLITE_PRIVATE void sqlite3CreateView(Parse*,Token*,Token*,Token*,ExprList*,Select*,int,int);
-
-#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE)
-SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse*,Table*);
-#else
-# define sqlite3ViewGetColumnNames(A,B) 0
-#endif
-
-#if SQLITE_MAX_ATTACHED>30
-SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask);
-#endif
-SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int);
-SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int);
-SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*);
-#ifndef SQLITE_OMIT_AUTOINCREMENT
-SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse);
-SQLITE_PRIVATE void sqlite3AutoincrementEnd(Parse *pParse);
-#else
-# define sqlite3AutoincrementBegin(X)
-# define sqlite3AutoincrementEnd(X)
-#endif
-SQLITE_PRIVATE void sqlite3Insert(Parse*, SrcList*, Select*, IdList*, int);
-SQLITE_PRIVATE void *sqlite3ArrayAllocate(sqlite3*,void*,int,int*,int*);
-SQLITE_PRIVATE IdList *sqlite3IdListAppend(sqlite3*, IdList*, Token*);
-SQLITE_PRIVATE int sqlite3IdListIndex(IdList*,const char*);
-SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(sqlite3*, SrcList*, int, int);
-SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(sqlite3*, SrcList*, Token*, Token*);
-SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, Token*,
- Token*, Select*, Expr*, IdList*);
-SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *);
-SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse*, SrcList*, ExprList*);
-SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, struct SrcList_item *);
-SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList*);
-SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*);
-SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3*, IdList*);
-SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3*, SrcList*);
-SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(sqlite3*,i16,int,char**);
-SQLITE_PRIVATE void sqlite3CreateIndex(Parse*,Token*,Token*,SrcList*,ExprList*,int,Token*,
- Expr*, int, int, u8);
-SQLITE_PRIVATE void sqlite3DropIndex(Parse*, SrcList*, int);
-SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*);
-SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*,
- Expr*,ExprList*,u32,Expr*,Expr*);
-SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*);
-SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*);
-SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, int);
-SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int);
-#if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY)
-SQLITE_PRIVATE Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,Expr*,char*);
-#endif
-SQLITE_PRIVATE void sqlite3DeleteFrom(Parse*, SrcList*, Expr*);
-SQLITE_PRIVATE void sqlite3Update(Parse*, SrcList*, ExprList*, Expr*, int);
-SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(Parse*,SrcList*,Expr*,ExprList*,ExprList*,u16,int);
-SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo*);
-SQLITE_PRIVATE LogEst sqlite3WhereOutputRowCount(WhereInfo*);
-SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo*);
-SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo*);
-SQLITE_PRIVATE int sqlite3WhereOrderedInnerLoop(WhereInfo*);
-SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo*);
-SQLITE_PRIVATE int sqlite3WhereContinueLabel(WhereInfo*);
-SQLITE_PRIVATE int sqlite3WhereBreakLabel(WhereInfo*);
-SQLITE_PRIVATE int sqlite3WhereOkOnePass(WhereInfo*, int*);
-#define ONEPASS_OFF 0 /* Use of ONEPASS not allowed */
-#define ONEPASS_SINGLE 1 /* ONEPASS valid for a single row update */
-#define ONEPASS_MULTI 2 /* ONEPASS is valid for multiple rows */
-SQLITE_PRIVATE void sqlite3ExprCodeLoadIndexColumn(Parse*, Index*, int, int, int);
-SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(Parse*, Table*, int, int, int, u8);
-SQLITE_PRIVATE void sqlite3ExprCodeGetColumnToReg(Parse*, Table*, int, int, int);
-SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(Vdbe*, Table*, int, int, int);
-SQLITE_PRIVATE void sqlite3ExprCodeMove(Parse*, int, int, int);
-SQLITE_PRIVATE void sqlite3ExprCacheStore(Parse*, int, int, int);
-SQLITE_PRIVATE void sqlite3ExprCachePush(Parse*);
-SQLITE_PRIVATE void sqlite3ExprCachePop(Parse*);
-SQLITE_PRIVATE void sqlite3ExprCacheRemove(Parse*, int, int);
-SQLITE_PRIVATE void sqlite3ExprCacheClear(Parse*);
-SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse*, int, int);
-SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int);
-SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse*, Expr*, int);
-SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int);
-SQLITE_PRIVATE void sqlite3ExprCodeAtInit(Parse*, Expr*, int, u8);
-SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse*, Expr*, int*);
-SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse*, Expr*, int);
-SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse*, Expr*, int);
-SQLITE_PRIVATE int sqlite3ExprCodeExprList(Parse*, ExprList*, int, int, u8);
-#define SQLITE_ECEL_DUP 0x01 /* Deep, not shallow copies */
-#define SQLITE_ECEL_FACTOR 0x02 /* Factor out constant terms */
-#define SQLITE_ECEL_REF 0x04 /* Use ExprList.u.x.iOrderByCol */
-SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse*, Expr*, int, int);
-SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse*, Expr*, int, int);
-SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse*, Expr*, int, int);
-SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3*,const char*, const char*);
-#define LOCATE_VIEW 0x01
-#define LOCATE_NOERR 0x02
-SQLITE_PRIVATE Table *sqlite3LocateTable(Parse*,u32 flags,const char*, const char*);
-SQLITE_PRIVATE Table *sqlite3LocateTableItem(Parse*,u32 flags,struct SrcList_item *);
-SQLITE_PRIVATE Index *sqlite3FindIndex(sqlite3*,const char*, const char*);
-SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTable(sqlite3*,int,const char*);
-SQLITE_PRIVATE void sqlite3UnlinkAndDeleteIndex(sqlite3*,int,const char*);
-SQLITE_PRIVATE void sqlite3Vacuum(Parse*);
-SQLITE_PRIVATE int sqlite3RunVacuum(char**, sqlite3*);
-SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3*, Token*);
-SQLITE_PRIVATE int sqlite3ExprCompare(Expr*, Expr*, int);
-SQLITE_PRIVATE int sqlite3ExprListCompare(ExprList*, ExprList*, int);
-SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr*, Expr*, int);
-SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*);
-SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*);
-SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(Expr*, int iCur, Index *pIdx);
-SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr*, SrcList*);
-SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse*);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
-SQLITE_PRIVATE void sqlite3PrngSaveState(void);
-SQLITE_PRIVATE void sqlite3PrngRestoreState(void);
-#endif
-SQLITE_PRIVATE void sqlite3RollbackAll(sqlite3*,int);
-SQLITE_PRIVATE void sqlite3CodeVerifySchema(Parse*, int);
-SQLITE_PRIVATE void sqlite3CodeVerifyNamedSchema(Parse*, const char *zDb);
-SQLITE_PRIVATE void sqlite3BeginTransaction(Parse*, int);
-SQLITE_PRIVATE void sqlite3CommitTransaction(Parse*);
-SQLITE_PRIVATE void sqlite3RollbackTransaction(Parse*);
-SQLITE_PRIVATE void sqlite3Savepoint(Parse*, int, Token*);
-SQLITE_PRIVATE void sqlite3CloseSavepoints(sqlite3 *);
-SQLITE_PRIVATE void sqlite3LeaveMutexAndCloseZombie(sqlite3*);
-SQLITE_PRIVATE int sqlite3ExprIsConstant(Expr*);
-SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*);
-SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8);
-SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int);
-#ifdef SQLITE_ENABLE_CURSOR_HINTS
-SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*);
-#endif
-SQLITE_PRIVATE int sqlite3ExprIsInteger(Expr*, int*);
-SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*);
-SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char);
-SQLITE_PRIVATE int sqlite3IsRowid(const char*);
-SQLITE_PRIVATE void sqlite3GenerateRowDelete(
- Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8,int);
-SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*, int);
-SQLITE_PRIVATE int sqlite3GenerateIndexKey(Parse*, Index*, int, int, int, int*,Index*,int);
-SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse*,int);
-SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(Parse*,Table*,int*,int,int,int,int,
- u8,u8,int,int*,int*);
-SQLITE_PRIVATE void sqlite3CompleteInsertion(Parse*,Table*,int,int,int,int*,int,int,int);
-SQLITE_PRIVATE int sqlite3OpenTableAndIndices(Parse*, Table*, int, u8, int, u8*, int*, int*);
-SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse*, int, int);
-SQLITE_PRIVATE void sqlite3MultiWrite(Parse*);
-SQLITE_PRIVATE void sqlite3MayAbort(Parse*);
-SQLITE_PRIVATE void sqlite3HaltConstraint(Parse*, int, int, char*, i8, u8);
-SQLITE_PRIVATE void sqlite3UniqueConstraint(Parse*, int, Index*);
-SQLITE_PRIVATE void sqlite3RowidConstraint(Parse*, int, Table*);
-SQLITE_PRIVATE Expr *sqlite3ExprDup(sqlite3*,Expr*,int);
-SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3*,ExprList*,int);
-SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3*,SrcList*,int);
-SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3*,IdList*);
-SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3*,Select*,int);
-#if SELECTTRACE_ENABLED
-SQLITE_PRIVATE void sqlite3SelectSetName(Select*,const char*);
-#else
-# define sqlite3SelectSetName(A,B)
-#endif
-SQLITE_PRIVATE void sqlite3InsertBuiltinFuncs(FuncDef*,int);
-SQLITE_PRIVATE FuncDef *sqlite3FindFunction(sqlite3*,const char*,int,u8,u8);
-SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void);
-SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void);
-SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3*);
-SQLITE_PRIVATE int sqlite3SafetyCheckOk(sqlite3*);
-SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3*);
-SQLITE_PRIVATE void sqlite3ChangeCookie(Parse*, int);
-
-#if !defined(SQLITE_OMIT_VIEW) && !defined(SQLITE_OMIT_TRIGGER)
-SQLITE_PRIVATE void sqlite3MaterializeView(Parse*, Table*, Expr*, int);
-#endif
-
-#ifndef SQLITE_OMIT_TRIGGER
-SQLITE_PRIVATE void sqlite3BeginTrigger(Parse*, Token*,Token*,int,int,IdList*,SrcList*,
- Expr*,int, int);
-SQLITE_PRIVATE void sqlite3FinishTrigger(Parse*, TriggerStep*, Token*);
-SQLITE_PRIVATE void sqlite3DropTrigger(Parse*, SrcList*, int);
-SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse*, Trigger*);
-SQLITE_PRIVATE Trigger *sqlite3TriggersExist(Parse *, Table*, int, ExprList*, int *pMask);
-SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *, Table *);
-SQLITE_PRIVATE void sqlite3CodeRowTrigger(Parse*, Trigger *, int, ExprList*, int, Table *,
- int, int, int);
-SQLITE_PRIVATE void sqlite3CodeRowTriggerDirect(Parse *, Trigger *, Table *, int, int, int);
- void sqliteViewTriggers(Parse*, Table*, Expr*, int, ExprList*);
-SQLITE_PRIVATE void sqlite3DeleteTriggerStep(sqlite3*, TriggerStep*);
-SQLITE_PRIVATE TriggerStep *sqlite3TriggerSelectStep(sqlite3*,Select*);
-SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(sqlite3*,Token*, IdList*,
- Select*,u8);
-SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(sqlite3*,Token*,ExprList*, Expr*, u8);
-SQLITE_PRIVATE TriggerStep *sqlite3TriggerDeleteStep(sqlite3*,Token*, Expr*);
-SQLITE_PRIVATE void sqlite3DeleteTrigger(sqlite3*, Trigger*);
-SQLITE_PRIVATE void sqlite3UnlinkAndDeleteTrigger(sqlite3*,int,const char*);
-SQLITE_PRIVATE u32 sqlite3TriggerColmask(Parse*,Trigger*,ExprList*,int,int,Table*,int);
-# define sqlite3ParseToplevel(p) ((p)->pToplevel ? (p)->pToplevel : (p))
-# define sqlite3IsToplevel(p) ((p)->pToplevel==0)
-#else
-# define sqlite3TriggersExist(B,C,D,E,F) 0
-# define sqlite3DeleteTrigger(A,B)
-# define sqlite3DropTriggerPtr(A,B)
-# define sqlite3UnlinkAndDeleteTrigger(A,B,C)
-# define sqlite3CodeRowTrigger(A,B,C,D,E,F,G,H,I)
-# define sqlite3CodeRowTriggerDirect(A,B,C,D,E,F)
-# define sqlite3TriggerList(X, Y) 0
-# define sqlite3ParseToplevel(p) p
-# define sqlite3IsToplevel(p) 1
-# define sqlite3TriggerColmask(A,B,C,D,E,F,G) 0
-#endif
-
-SQLITE_PRIVATE int sqlite3JoinType(Parse*, Token*, Token*, Token*);
-SQLITE_PRIVATE void sqlite3CreateForeignKey(Parse*, ExprList*, Token*, ExprList*, int);
-SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse*, int);
-#ifndef SQLITE_OMIT_AUTHORIZATION
-SQLITE_PRIVATE void sqlite3AuthRead(Parse*,Expr*,Schema*,SrcList*);
-SQLITE_PRIVATE int sqlite3AuthCheck(Parse*,int, const char*, const char*, const char*);
-SQLITE_PRIVATE void sqlite3AuthContextPush(Parse*, AuthContext*, const char*);
-SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext*);
-SQLITE_PRIVATE int sqlite3AuthReadCol(Parse*, const char *, const char *, int);
-#else
-# define sqlite3AuthRead(a,b,c,d)
-# define sqlite3AuthCheck(a,b,c,d,e) SQLITE_OK
-# define sqlite3AuthContextPush(a,b,c)
-# define sqlite3AuthContextPop(a) ((void)(a))
-#endif
-SQLITE_PRIVATE void sqlite3Attach(Parse*, Expr*, Expr*, Expr*);
-SQLITE_PRIVATE void sqlite3Detach(Parse*, Expr*);
-SQLITE_PRIVATE void sqlite3FixInit(DbFixer*, Parse*, int, const char*, const Token*);
-SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*);
-SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*);
-SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*);
-SQLITE_PRIVATE int sqlite3FixExprList(DbFixer*, ExprList*);
-SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*);
-SQLITE_PRIVATE int sqlite3AtoF(const char *z, double*, int, u8);
-SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*);
-SQLITE_PRIVATE int sqlite3Atoi(const char*);
-SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar);
-SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte);
-SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**);
-SQLITE_PRIVATE LogEst sqlite3LogEst(u64);
-SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst);
-#ifndef SQLITE_OMIT_VIRTUALTABLE
-SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double);
-#endif
-#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || \
- defined(SQLITE_ENABLE_STAT3_OR_STAT4) || \
- defined(SQLITE_EXPLAIN_ESTIMATED_ROWS)
-SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst);
-#endif
-
-/*
-** Routines to read and write variable-length integers. These used to
-** be defined locally, but now we use the varint routines in the util.c
-** file.
-*/
-SQLITE_PRIVATE int sqlite3PutVarint(unsigned char*, u64);
-SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *, u64 *);
-SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *, u32 *);
-SQLITE_PRIVATE int sqlite3VarintLen(u64 v);
-
-/*
-** The common case is for a varint to be a single byte. They following
-** macros handle the common case without a procedure call, but then call
-** the procedure for larger varints.
-*/
-#define getVarint32(A,B) \
- (u8)((*(A)<(u8)0x80)?((B)=(u32)*(A)),1:sqlite3GetVarint32((A),(u32 *)&(B)))
-#define putVarint32(A,B) \
- (u8)(((u32)(B)<(u32)0x80)?(*(A)=(unsigned char)(B)),1:\
- sqlite3PutVarint((A),(B)))
-#define getVarint sqlite3GetVarint
-#define putVarint sqlite3PutVarint
-
-
-SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3*, Index*);
-SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe*, Table*, int);
-SQLITE_PRIVATE char sqlite3CompareAffinity(Expr *pExpr, char aff2);
-SQLITE_PRIVATE int sqlite3IndexAffinityOk(Expr *pExpr, char idx_affinity);
-SQLITE_PRIVATE char sqlite3ExprAffinity(Expr *pExpr);
-SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8);
-SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*);
-SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...);
-SQLITE_PRIVATE void sqlite3Error(sqlite3*,int);
-SQLITE_PRIVATE void sqlite3SystemError(sqlite3*,int);
-SQLITE_PRIVATE void *sqlite3HexToBlob(sqlite3*, const char *z, int n);
-SQLITE_PRIVATE u8 sqlite3HexToInt(int h);
-SQLITE_PRIVATE int sqlite3TwoPartName(Parse *, Token *, Token *, Token **);
-
-#if defined(SQLITE_NEED_ERR_NAME)
-SQLITE_PRIVATE const char *sqlite3ErrName(int);
-#endif
-
-SQLITE_PRIVATE const char *sqlite3ErrStr(int);
-SQLITE_PRIVATE int sqlite3ReadSchema(Parse *pParse);
-SQLITE_PRIVATE CollSeq *sqlite3FindCollSeq(sqlite3*,u8 enc, const char*,int);
-SQLITE_PRIVATE CollSeq *sqlite3LocateCollSeq(Parse *pParse, const char*zName);
-SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, Expr *pExpr);
-SQLITE_PRIVATE Expr *sqlite3ExprAddCollateToken(Parse *pParse, Expr*, const Token*, int);
-SQLITE_PRIVATE Expr *sqlite3ExprAddCollateString(Parse*,Expr*,const char*);
-SQLITE_PRIVATE Expr *sqlite3ExprSkipCollate(Expr*);
-SQLITE_PRIVATE int sqlite3CheckCollSeq(Parse *, CollSeq *);
-SQLITE_PRIVATE int sqlite3CheckObjectName(Parse *, const char *);
-SQLITE_PRIVATE void sqlite3VdbeSetChanges(sqlite3 *, int);
-SQLITE_PRIVATE int sqlite3AddInt64(i64*,i64);
-SQLITE_PRIVATE int sqlite3SubInt64(i64*,i64);
-SQLITE_PRIVATE int sqlite3MulInt64(i64*,i64);
-SQLITE_PRIVATE int sqlite3AbsInt32(int);
-#ifdef SQLITE_ENABLE_8_3_NAMES
-SQLITE_PRIVATE void sqlite3FileSuffix3(const char*, char*);
-#else
-# define sqlite3FileSuffix3(X,Y)
-#endif
-SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,u8);
-
-SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8);
-SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8);
-SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8,
- void(*)(void*));
-SQLITE_PRIVATE void sqlite3ValueSetNull(sqlite3_value*);
-SQLITE_PRIVATE void sqlite3ValueFree(sqlite3_value*);
-SQLITE_PRIVATE sqlite3_value *sqlite3ValueNew(sqlite3 *);
-SQLITE_PRIVATE char *sqlite3Utf16to8(sqlite3 *, const void*, int, u8);
-SQLITE_PRIVATE int sqlite3ValueFromExpr(sqlite3 *, Expr *, u8, u8, sqlite3_value **);
-SQLITE_PRIVATE void sqlite3ValueApplyAffinity(sqlite3_value *, u8, u8);
-#ifndef SQLITE_AMALGAMATION
-SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[];
-SQLITE_PRIVATE const char sqlite3StrBINARY[];
-SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[];
-SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[];
-SQLITE_PRIVATE const Token sqlite3IntTokens[];
-SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config;
-SQLITE_PRIVATE FuncDefHash sqlite3BuiltinFunctions;
-#ifndef SQLITE_OMIT_WSD
-SQLITE_PRIVATE int sqlite3PendingByte;
-#endif
-#endif
-SQLITE_PRIVATE void sqlite3RootPageMoved(sqlite3*, int, int, int);
-SQLITE_PRIVATE void sqlite3Reindex(Parse*, Token*, Token*);
-SQLITE_PRIVATE void sqlite3AlterFunctions(void);
-SQLITE_PRIVATE void sqlite3AlterRenameTable(Parse*, SrcList*, Token*);
-SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *, int *);
-SQLITE_PRIVATE void sqlite3NestedParse(Parse*, const char*, ...);
-SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3*);
-SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *, Expr *, int, int);
-SQLITE_PRIVATE void sqlite3SelectPrep(Parse*, Select*, NameContext*);
-SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p);
-SQLITE_PRIVATE int sqlite3MatchSpanName(const char*, const char*, const char*, const char*);
-SQLITE_PRIVATE int sqlite3ResolveExprNames(NameContext*, Expr*);
-SQLITE_PRIVATE int sqlite3ResolveExprListNames(NameContext*, ExprList*);
-SQLITE_PRIVATE void sqlite3ResolveSelectNames(Parse*, Select*, NameContext*);
-SQLITE_PRIVATE void sqlite3ResolveSelfReference(Parse*,Table*,int,Expr*,ExprList*);
-SQLITE_PRIVATE int sqlite3ResolveOrderGroupBy(Parse*, Select*, ExprList*, const char*);
-SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *, Table *, int, int);
-SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *, Token *);
-SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *, SrcList *);
-SQLITE_PRIVATE CollSeq *sqlite3GetCollSeq(Parse*, u8, CollSeq *, const char*);
-SQLITE_PRIVATE char sqlite3AffinityType(const char*, u8*);
-SQLITE_PRIVATE void sqlite3Analyze(Parse*, Token*, Token*);
-SQLITE_PRIVATE int sqlite3InvokeBusyHandler(BusyHandler*);
-SQLITE_PRIVATE int sqlite3FindDb(sqlite3*, Token*);
-SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *, const char *);
-SQLITE_PRIVATE int sqlite3AnalysisLoad(sqlite3*,int iDB);
-SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3*,Index*);
-SQLITE_PRIVATE void sqlite3DefaultRowEst(Index*);
-SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3*, int);
-SQLITE_PRIVATE int sqlite3IsLikeFunction(sqlite3*,Expr*,int*,char*);
-SQLITE_PRIVATE void sqlite3SchemaClear(void *);
-SQLITE_PRIVATE Schema *sqlite3SchemaGet(sqlite3 *, Btree *);
-SQLITE_PRIVATE int sqlite3SchemaToIndex(sqlite3 *db, Schema *);
-SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3*,int,int);
-SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo*);
-SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoRef(KeyInfo*);
-SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoOfIndex(Parse*, Index*);
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE int sqlite3KeyInfoIsWriteable(KeyInfo*);
-#endif
-SQLITE_PRIVATE int sqlite3CreateFunc(sqlite3 *, const char *, int, int, void *,
- void (*)(sqlite3_context*,int,sqlite3_value **),
- void (*)(sqlite3_context*,int,sqlite3_value **), void (*)(sqlite3_context*),
- FuncDestructor *pDestructor
-);
-SQLITE_PRIVATE void sqlite3OomFault(sqlite3*);
-SQLITE_PRIVATE void sqlite3OomClear(sqlite3*);
-SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int);
-SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *);
-
-SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int);
-SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum*,const char*,int);
-SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum*,const char*);
-SQLITE_PRIVATE void sqlite3AppendChar(StrAccum*,int,char);
-SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*);
-SQLITE_PRIVATE void sqlite3StrAccumReset(StrAccum*);
-SQLITE_PRIVATE void sqlite3SelectDestInit(SelectDest*,int,int);
-SQLITE_PRIVATE Expr *sqlite3CreateColumnExpr(sqlite3 *, SrcList *, int, int);
-
-SQLITE_PRIVATE void sqlite3BackupRestart(sqlite3_backup *);
-SQLITE_PRIVATE void sqlite3BackupUpdate(sqlite3_backup *, Pgno, const u8 *);
-
-#ifdef SQLITE_ENABLE_STAT3_OR_STAT4
-SQLITE_PRIVATE void sqlite3AnalyzeFunctions(void);
-SQLITE_PRIVATE int sqlite3Stat4ProbeSetValue(Parse*,Index*,UnpackedRecord**,Expr*,u8,int,int*);
-SQLITE_PRIVATE int sqlite3Stat4ValueFromExpr(Parse*, Expr*, u8, sqlite3_value**);
-SQLITE_PRIVATE void sqlite3Stat4ProbeFree(UnpackedRecord*);
-SQLITE_PRIVATE int sqlite3Stat4Column(sqlite3*, const void*, int, int, sqlite3_value**);
-#endif
-
-/*
-** The interface to the LEMON-generated parser
-*/
-SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(u64));
-SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*));
-SQLITE_PRIVATE void sqlite3Parser(void*, int, Token, Parse*);
-#ifdef YYTRACKMAXSTACKDEPTH
-SQLITE_PRIVATE int sqlite3ParserStackPeak(void*);
-#endif
-
-SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3*);
-#ifndef SQLITE_OMIT_LOAD_EXTENSION
-SQLITE_PRIVATE void sqlite3CloseExtensions(sqlite3*);
-#else
-# define sqlite3CloseExtensions(X)
-#endif
-
-#ifndef SQLITE_OMIT_SHARED_CACHE
-SQLITE_PRIVATE void sqlite3TableLock(Parse *, int, int, u8, const char *);
-#else
- #define sqlite3TableLock(v,w,x,y,z)
-#endif
-
-#ifdef SQLITE_TEST
-SQLITE_PRIVATE int sqlite3Utf8To8(unsigned char*);
-#endif
-
-#ifdef SQLITE_OMIT_VIRTUALTABLE
-# define sqlite3VtabClear(Y)
-# define sqlite3VtabSync(X,Y) SQLITE_OK
-# define sqlite3VtabRollback(X)
-# define sqlite3VtabCommit(X)
-# define sqlite3VtabInSync(db) 0
-# define sqlite3VtabLock(X)
-# define sqlite3VtabUnlock(X)
-# define sqlite3VtabUnlockList(X)
-# define sqlite3VtabSavepoint(X, Y, Z) SQLITE_OK
-# define sqlite3GetVTable(X,Y) ((VTable*)0)
-#else
-SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table*);
-SQLITE_PRIVATE void sqlite3VtabDisconnect(sqlite3 *db, Table *p);
-SQLITE_PRIVATE int sqlite3VtabSync(sqlite3 *db, Vdbe*);
-SQLITE_PRIVATE int sqlite3VtabRollback(sqlite3 *db);
-SQLITE_PRIVATE int sqlite3VtabCommit(sqlite3 *db);
-SQLITE_PRIVATE void sqlite3VtabLock(VTable *);
-SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *);
-SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3*);
-SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *, int, int);
-SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe*, sqlite3_vtab*);
-SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3*, Table*);
-# define sqlite3VtabInSync(db) ((db)->nVTrans>0 && (db)->aVTrans==0)
-#endif
-SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse*,Module*);
-SQLITE_PRIVATE void sqlite3VtabEponymousTableClear(sqlite3*,Module*);
-SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse*,Table*);
-SQLITE_PRIVATE void sqlite3VtabBeginParse(Parse*, Token*, Token*, Token*, int);
-SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse*, Token*);
-SQLITE_PRIVATE void sqlite3VtabArgInit(Parse*);
-SQLITE_PRIVATE void sqlite3VtabArgExtend(Parse*, Token*);
-SQLITE_PRIVATE int sqlite3VtabCallCreate(sqlite3*, int, const char *, char **);
-SQLITE_PRIVATE int sqlite3VtabCallConnect(Parse*, Table*);
-SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3*, int, const char *);
-SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *);
-SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*);
-SQLITE_PRIVATE void sqlite3InvalidFunction(sqlite3_context*,int,sqlite3_value**);
-SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*);
-SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int);
-SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *);
-SQLITE_PRIVATE void sqlite3ParserReset(Parse*);
-SQLITE_PRIVATE int sqlite3Reprepare(Vdbe*);
-SQLITE_PRIVATE void sqlite3ExprListCheckLength(Parse*, ExprList*, const char*);
-SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq(Parse *, Expr *, Expr *);
-SQLITE_PRIVATE int sqlite3TempInMemory(const sqlite3*);
-SQLITE_PRIVATE const char *sqlite3JournalModename(int);
-#ifndef SQLITE_OMIT_WAL
-SQLITE_PRIVATE int sqlite3Checkpoint(sqlite3*, int, int, int*, int*);
-SQLITE_PRIVATE int sqlite3WalDefaultHook(void*,sqlite3*,const char*,int);
-#endif
-#ifndef SQLITE_OMIT_CTE
-SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Token*,ExprList*,Select*);
-SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*);
-SQLITE_PRIVATE void sqlite3WithPush(Parse*, With*, u8);
-#else
-#define sqlite3WithPush(x,y,z)
-#define sqlite3WithDelete(x,y)
-#endif
-
-/* Declarations for functions in fkey.c. All of these are replaced by
-** no-op macros if OMIT_FOREIGN_KEY is defined. In this case no foreign
-** key functionality is available. If OMIT_TRIGGER is defined but
-** OMIT_FOREIGN_KEY is not, only some of the functions are no-oped. In
-** this case foreign keys are parsed, but no other functionality is
-** provided (enforcement of FK constraints requires the triggers sub-system).
-*/
-#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
-SQLITE_PRIVATE void sqlite3FkCheck(Parse*, Table*, int, int, int*, int);
-SQLITE_PRIVATE void sqlite3FkDropTable(Parse*, SrcList *, Table*);
-SQLITE_PRIVATE void sqlite3FkActions(Parse*, Table*, ExprList*, int, int*, int);
-SQLITE_PRIVATE int sqlite3FkRequired(Parse*, Table*, int*, int);
-SQLITE_PRIVATE u32 sqlite3FkOldmask(Parse*, Table*);
-SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *);
-#else
- #define sqlite3FkActions(a,b,c,d,e,f)
- #define sqlite3FkCheck(a,b,c,d,e,f)
- #define sqlite3FkDropTable(a,b,c)
- #define sqlite3FkOldmask(a,b) 0
- #define sqlite3FkRequired(a,b,c,d) 0
-#endif
-#ifndef SQLITE_OMIT_FOREIGN_KEY
-SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *, Table*);
-SQLITE_PRIVATE int sqlite3FkLocateIndex(Parse*,Table*,FKey*,Index**,int**);
-#else
- #define sqlite3FkDelete(a,b)
- #define sqlite3FkLocateIndex(a,b,c,d,e)
-#endif
-
-
-/*
-** Available fault injectors. Should be numbered beginning with 0.
-*/
-#define SQLITE_FAULTINJECTOR_MALLOC 0
-#define SQLITE_FAULTINJECTOR_COUNT 1
-
-/*
-** The interface to the code in fault.c used for identifying "benign"
-** malloc failures. This is only present if SQLITE_OMIT_BUILTIN_TEST
-** is not defined.
-*/
-#ifndef SQLITE_OMIT_BUILTIN_TEST
-SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void);
-SQLITE_PRIVATE void sqlite3EndBenignMalloc(void);
-#else
- #define sqlite3BeginBenignMalloc()
- #define sqlite3EndBenignMalloc()
-#endif
-
-/*
-** Allowed return values from sqlite3FindInIndex()
-*/
-#define IN_INDEX_ROWID 1 /* Search the rowid of the table */
-#define IN_INDEX_EPH 2 /* Search an ephemeral b-tree */
-#define IN_INDEX_INDEX_ASC 3 /* Existing index ASCENDING */
-#define IN_INDEX_INDEX_DESC 4 /* Existing index DESCENDING */
-#define IN_INDEX_NOOP 5 /* No table available. Use comparisons */
-/*
-** Allowed flags for the 3rd parameter to sqlite3FindInIndex().
-*/
-#define IN_INDEX_NOOP_OK 0x0001 /* OK to return IN_INDEX_NOOP */
-#define IN_INDEX_MEMBERSHIP 0x0002 /* IN operator used for membership test */
-#define IN_INDEX_LOOP 0x0004 /* IN operator used as a loop */
-SQLITE_PRIVATE int sqlite3FindInIndex(Parse *, Expr *, u32, int*);
-
-SQLITE_PRIVATE int sqlite3JournalOpen(sqlite3_vfs *, const char *, sqlite3_file *, int, int);
-SQLITE_PRIVATE int sqlite3JournalSize(sqlite3_vfs *);
-#ifdef SQLITE_ENABLE_ATOMIC_WRITE
-SQLITE_PRIVATE int sqlite3JournalCreate(sqlite3_file *);
-#endif
-
-SQLITE_PRIVATE int sqlite3JournalIsInMemory(sqlite3_file *p);
-SQLITE_PRIVATE void sqlite3MemJournalOpen(sqlite3_file *);
-
-SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p);
-#if SQLITE_MAX_EXPR_DEPTH>0
-SQLITE_PRIVATE int sqlite3SelectExprHeight(Select *);
-SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse*, int);
-#else
- #define sqlite3SelectExprHeight(x) 0
- #define sqlite3ExprCheckHeight(x,y)
-#endif
-
-SQLITE_PRIVATE u32 sqlite3Get4byte(const u8*);
-SQLITE_PRIVATE void sqlite3Put4byte(u8*, u32);
-
-#ifdef SQLITE_ENABLE_UNLOCK_NOTIFY
-SQLITE_PRIVATE void sqlite3ConnectionBlocked(sqlite3 *, sqlite3 *);
-SQLITE_PRIVATE void sqlite3ConnectionUnlocked(sqlite3 *db);
-SQLITE_PRIVATE void sqlite3ConnectionClosed(sqlite3 *db);
-#else
- #define sqlite3ConnectionBlocked(x,y)
- #define sqlite3ConnectionUnlocked(x)
- #define sqlite3ConnectionClosed(x)
-#endif
-
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE void sqlite3ParserTrace(FILE*, char *);
-#endif
-
-/*
-** If the SQLITE_ENABLE IOTRACE exists then the global variable
-** sqlite3IoTrace is a pointer to a printf-like routine used to
-** print I/O tracing messages.
-*/
-#ifdef SQLITE_ENABLE_IOTRACE
-# define IOTRACE(A) if( sqlite3IoTrace ){ sqlite3IoTrace A; }
-SQLITE_PRIVATE void sqlite3VdbeIOTraceSql(Vdbe*);
-SQLITE_API SQLITE_EXTERN void (SQLITE_CDECL *sqlite3IoTrace)(const char*,...);
-#else
-# define IOTRACE(A)
-# define sqlite3VdbeIOTraceSql(X)
-#endif
-
-/*
-** These routines are available for the mem2.c debugging memory allocator
-** only. They are used to verify that different "types" of memory
-** allocations are properly tracked by the system.
-**
-** sqlite3MemdebugSetType() sets the "type" of an allocation to one of
-** the MEMTYPE_* macros defined below. The type must be a bitmask with
-** a single bit set.
-**
-** sqlite3MemdebugHasType() returns true if any of the bits in its second
-** argument match the type set by the previous sqlite3MemdebugSetType().
-** sqlite3MemdebugHasType() is intended for use inside assert() statements.
-**
-** sqlite3MemdebugNoType() returns true if none of the bits in its second
-** argument match the type set by the previous sqlite3MemdebugSetType().
-**
-** Perhaps the most important point is the difference between MEMTYPE_HEAP
-** and MEMTYPE_LOOKASIDE. If an allocation is MEMTYPE_LOOKASIDE, that means
-** it might have been allocated by lookaside, except the allocation was
-** too large or lookaside was already full. It is important to verify
-** that allocations that might have been satisfied by lookaside are not
-** passed back to non-lookaside free() routines. Asserts such as the
-** example above are placed on the non-lookaside free() routines to verify
-** this constraint.
-**
-** All of this is no-op for a production build. It only comes into
-** play when the SQLITE_MEMDEBUG compile-time option is used.
-*/
-#ifdef SQLITE_MEMDEBUG
-SQLITE_PRIVATE void sqlite3MemdebugSetType(void*,u8);
-SQLITE_PRIVATE int sqlite3MemdebugHasType(void*,u8);
-SQLITE_PRIVATE int sqlite3MemdebugNoType(void*,u8);
-#else
-# define sqlite3MemdebugSetType(X,Y) /* no-op */
-# define sqlite3MemdebugHasType(X,Y) 1
-# define sqlite3MemdebugNoType(X,Y) 1
-#endif
-#define MEMTYPE_HEAP 0x01 /* General heap allocations */
-#define MEMTYPE_LOOKASIDE 0x02 /* Heap that might have been lookaside */
-#define MEMTYPE_SCRATCH 0x04 /* Scratch allocations */
-#define MEMTYPE_PCACHE 0x08 /* Page cache allocations */
-
-/*
-** Threading interface
-*/
-#if SQLITE_MAX_WORKER_THREADS>0
-SQLITE_PRIVATE int sqlite3ThreadCreate(SQLiteThread**,void*(*)(void*),void*);
-SQLITE_PRIVATE int sqlite3ThreadJoin(SQLiteThread*, void**);
-#endif
-
-#if defined(SQLITE_ENABLE_DBSTAT_VTAB) || defined(SQLITE_TEST)
-SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3*);
-#endif
-
-#endif /* SQLITEINT_H */
-
-/************** End of sqliteInt.h *******************************************/
-/************** Begin file global.c ******************************************/
-/*
-** 2008 June 13
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** This file contains definitions of global variables and constants.
-*/
-/* #include "sqliteInt.h" */
-
-/* An array to map all upper-case characters into their corresponding
-** lower-case character.
-**
-** SQLite only considers US-ASCII (or EBCDIC) characters. We do not
-** handle case conversions for the UTF character set since the tables
-** involved are nearly as big or bigger than SQLite itself.
-*/
-SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[] = {
-#ifdef SQLITE_ASCII
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
- 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
- 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 97, 98, 99,100,101,102,103,
- 104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,
- 122, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,107,
- 108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,
- 126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,
- 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,
- 162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,
- 180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,
- 198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,
- 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,
- 234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,
- 252,253,254,255
-#endif
-#ifdef SQLITE_EBCDIC
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* 0x */
- 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, /* 1x */
- 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, /* 2x */
- 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, /* 3x */
- 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, /* 4x */
- 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, /* 5x */
- 96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111, /* 6x */
- 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, /* 7x */
- 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, /* 8x */
- 144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159, /* 9x */
- 160,161,162,163,164,165,166,167,168,169,170,171,140,141,142,175, /* Ax */
- 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, /* Bx */
- 192,129,130,131,132,133,134,135,136,137,202,203,204,205,206,207, /* Cx */
- 208,145,146,147,148,149,150,151,152,153,218,219,220,221,222,223, /* Dx */
- 224,225,162,163,164,165,166,167,168,169,234,235,236,237,238,239, /* Ex */
- 240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255, /* Fx */
-#endif
-};
-
-/*
-** The following 256 byte lookup table is used to support SQLites built-in
-** equivalents to the following standard library functions:
-**
-** isspace() 0x01
-** isalpha() 0x02
-** isdigit() 0x04
-** isalnum() 0x06
-** isxdigit() 0x08
-** toupper() 0x20
-** SQLite identifier character 0x40
-** Quote character 0x80
-**
-** Bit 0x20 is set if the mapped character requires translation to upper
-** case. i.e. if the character is a lower-case ASCII character.
-** If x is a lower-case ASCII character, then its upper-case equivalent
-** is (x - 0x20). Therefore toupper() can be implemented as:
-**
-** (x & ~(map[x]&0x20))
-**
-** Standard function tolower() is implemented using the sqlite3UpperToLower[]
-** array. tolower() is used more often than toupper() by SQLite.
-**
-** Bit 0x40 is set if the character non-alphanumeric and can be used in an
-** SQLite identifier. Identifiers are alphanumerics, "_", "$", and any
-** non-ASCII UTF character. Hence the test for whether or not a character is
-** part of an identifier is 0x46.
-**
-** SQLite's versions are identical to the standard versions assuming a
-** locale of "C". They are implemented as macros in sqliteInt.h.
-*/
-#ifdef SQLITE_ASCII
-SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00..07 ........ */
- 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, /* 08..0f ........ */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10..17 ........ */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18..1f ........ */
- 0x01, 0x00, 0x80, 0x00, 0x40, 0x00, 0x00, 0x80, /* 20..27 !"#$%&' */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28..2f ()*+,-./ */
- 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, /* 30..37 01234567 */
- 0x0c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38..3f 89:;<=>? */
-
- 0x00, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x0a, 0x02, /* 40..47 @ABCDEFG */
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 48..4f HIJKLMNO */
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, /* 50..57 PQRSTUVW */
- 0x02, 0x02, 0x02, 0x80, 0x00, 0x00, 0x00, 0x40, /* 58..5f XYZ[\]^_ */
- 0x80, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x2a, 0x22, /* 60..67 `abcdefg */
- 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 68..6f hijklmno */
- 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, /* 70..77 pqrstuvw */
- 0x22, 0x22, 0x22, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78..7f xyz{|}~. */
-
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 80..87 ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 88..8f ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 90..97 ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* 98..9f ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* a0..a7 ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* a8..af ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* b0..b7 ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* b8..bf ........ */
-
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* c0..c7 ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* c8..cf ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* d0..d7 ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* d8..df ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* e0..e7 ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* e8..ef ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, /* f0..f7 ........ */
- 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 /* f8..ff ........ */
-};
-#endif
-
-/* EVIDENCE-OF: R-02982-34736 In order to maintain full backwards
-** compatibility for legacy applications, the URI filename capability is
-** disabled by default.
-**
-** EVIDENCE-OF: R-38799-08373 URI filenames can be enabled or disabled
-** using the SQLITE_USE_URI=1 or SQLITE_USE_URI=0 compile-time options.
-**
-** EVIDENCE-OF: R-43642-56306 By default, URI handling is globally
-** disabled. The default value may be changed by compiling with the
-** SQLITE_USE_URI symbol defined.
-*/
-#ifndef SQLITE_USE_URI
-# define SQLITE_USE_URI 0
-#endif
-
-/* EVIDENCE-OF: R-38720-18127 The default setting is determined by the
-** SQLITE_ALLOW_COVERING_INDEX_SCAN compile-time option, or is "on" if
-** that compile-time option is omitted.
-*/
-#ifndef SQLITE_ALLOW_COVERING_INDEX_SCAN
-# define SQLITE_ALLOW_COVERING_INDEX_SCAN 1
-#endif
-
-/* The minimum PMA size is set to this value multiplied by the database
-** page size in bytes.
-*/
-#ifndef SQLITE_SORTER_PMASZ
-# define SQLITE_SORTER_PMASZ 250
-#endif
-
-/* Statement journals spill to disk when their size exceeds the following
-** threashold (in bytes). 0 means that statement journals are created and
-** written to disk immediately (the default behavior for SQLite versions
-** before 3.12.0). -1 means always keep the entire statement journal in
-** memory. (The statement journal is also always held entirely in memory
-** if journal_mode=MEMORY or if temp_store=MEMORY, regardless of this
-** setting.)
-*/
-#ifndef SQLITE_STMTJRNL_SPILL
-# define SQLITE_STMTJRNL_SPILL (64*1024)
-#endif
-
-/*
-** The following singleton contains the global configuration for
-** the SQLite library.
-*/
-SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = {
- SQLITE_DEFAULT_MEMSTATUS, /* bMemstat */
- 1, /* bCoreMutex */
- SQLITE_THREADSAFE==1, /* bFullMutex */
- SQLITE_USE_URI, /* bOpenUri */
- SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */
- 0x7ffffffe, /* mxStrlen */
- 0, /* neverCorrupt */
- 128, /* szLookaside */
- 500, /* nLookaside */
- SQLITE_STMTJRNL_SPILL, /* nStmtSpill */
- {0,0,0,0,0,0,0,0}, /* m */
- {0,0,0,0,0,0,0,0,0}, /* mutex */
- {0,0,0,0,0,0,0,0,0,0,0,0,0},/* pcache2 */
- (void*)0, /* pHeap */
- 0, /* nHeap */
- 0, 0, /* mnHeap, mxHeap */
- SQLITE_DEFAULT_MMAP_SIZE, /* szMmap */
- SQLITE_MAX_MMAP_SIZE, /* mxMmap */
- (void*)0, /* pScratch */
- 0, /* szScratch */
- 0, /* nScratch */
- (void*)0, /* pPage */
- 0, /* szPage */
- SQLITE_DEFAULT_PCACHE_INITSZ, /* nPage */
- 0, /* mxParserStack */
- 0, /* sharedCacheEnabled */
- SQLITE_SORTER_PMASZ, /* szPma */
- /* All the rest should always be initialized to zero */
- 0, /* isInit */
- 0, /* inProgress */
- 0, /* isMutexInit */
- 0, /* isMallocInit */
- 0, /* isPCacheInit */
- 0, /* nRefInitMutex */
- 0, /* pInitMutex */
- 0, /* xLog */
- 0, /* pLogArg */
-#ifdef SQLITE_ENABLE_SQLLOG
- 0, /* xSqllog */
- 0, /* pSqllogArg */
-#endif
-#ifdef SQLITE_VDBE_COVERAGE
- 0, /* xVdbeBranch */
- 0, /* pVbeBranchArg */
-#endif
-#ifndef SQLITE_OMIT_BUILTIN_TEST
- 0, /* xTestCallback */
-#endif
- 0 /* bLocaltimeFault */
-};
-
-/*
-** Hash table for global functions - functions common to all
-** database connections. After initialization, this table is
-** read-only.
-*/
-SQLITE_PRIVATE FuncDefHash sqlite3BuiltinFunctions;
-
-/*
-** Constant tokens for values 0 and 1.
-*/
-SQLITE_PRIVATE const Token sqlite3IntTokens[] = {
- { "0", 1 },
- { "1", 1 }
-};
-
-
-/*
-** The value of the "pending" byte must be 0x40000000 (1 byte past the
-** 1-gibabyte boundary) in a compatible database. SQLite never uses
-** the database page that contains the pending byte. It never attempts
-** to read or write that page. The pending byte page is set assign
-** for use by the VFS layers as space for managing file locks.
-**
-** During testing, it is often desirable to move the pending byte to
-** a different position in the file. This allows code that has to
-** deal with the pending byte to run on files that are much smaller
-** than 1 GiB. The sqlite3_test_control() interface can be used to
-** move the pending byte.
-**
-** IMPORTANT: Changing the pending byte to any value other than
-** 0x40000000 results in an incompatible database file format!
-** Changing the pending byte during operation will result in undefined
-** and incorrect behavior.
-*/
-#ifndef SQLITE_OMIT_WSD
-SQLITE_PRIVATE int sqlite3PendingByte = 0x40000000;
-#endif
-
-/* #include "opcodes.h" */
-/*
-** Properties of opcodes. The OPFLG_INITIALIZER macro is
-** created by mkopcodeh.awk during compilation. Data is obtained
-** from the comments following the "case OP_xxxx:" statements in
-** the vdbe.c file.
-*/
-SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[] = OPFLG_INITIALIZER;
-
-/*
-** Name of the default collating sequence
-*/
-SQLITE_PRIVATE const char sqlite3StrBINARY[] = "BINARY";
-
-/************** End of global.c **********************************************/
-/************** Begin file ctime.c *******************************************/
-/*
-** 2010 February 23
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** This file implements routines used to report what compile-time options
-** SQLite was built with.
-*/
-
-#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS
-
-/* #include "sqliteInt.h" */
-
-/*
-** An array of names of all compile-time options. This array should
-** be sorted A-Z.
-**
-** This array looks large, but in a typical installation actually uses
-** only a handful of compile-time options, so most times this array is usually
-** rather short and uses little memory space.
-*/
-static const char * const azCompileOpt[] = {
-
-/* These macros are provided to "stringify" the value of the define
-** for those options in which the value is meaningful. */
-#define CTIMEOPT_VAL_(opt) #opt
-#define CTIMEOPT_VAL(opt) CTIMEOPT_VAL_(opt)
-
-#if SQLITE_32BIT_ROWID
- "32BIT_ROWID",
-#endif
-#if SQLITE_4_BYTE_ALIGNED_MALLOC
- "4_BYTE_ALIGNED_MALLOC",
-#endif
-#if SQLITE_CASE_SENSITIVE_LIKE
- "CASE_SENSITIVE_LIKE",
-#endif
-#if SQLITE_CHECK_PAGES
- "CHECK_PAGES",
-#endif
-#if defined(__clang__) && defined(__clang_major__)
- "COMPILER=clang-" CTIMEOPT_VAL(__clang_major__) "."
- CTIMEOPT_VAL(__clang_minor__) "."
- CTIMEOPT_VAL(__clang_patchlevel__),
-#elif defined(_MSC_VER)
- "COMPILER=msvc-" CTIMEOPT_VAL(_MSC_VER),
-#elif defined(__GNUC__) && defined(__VERSION__)
- "COMPILER=gcc-" __VERSION__,
-#endif
-#if SQLITE_COVERAGE_TEST
- "COVERAGE_TEST",
-#endif
-#if SQLITE_DEBUG
- "DEBUG",
-#endif
-#if SQLITE_DEFAULT_LOCKING_MODE
- "DEFAULT_LOCKING_MODE=" CTIMEOPT_VAL(SQLITE_DEFAULT_LOCKING_MODE),
-#endif
-#if defined(SQLITE_DEFAULT_MMAP_SIZE) && !defined(SQLITE_DEFAULT_MMAP_SIZE_xc)
- "DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE),
-#endif
-#if SQLITE_DISABLE_DIRSYNC
- "DISABLE_DIRSYNC",
-#endif
-#if SQLITE_DISABLE_LFS
- "DISABLE_LFS",
-#endif
-#if SQLITE_ENABLE_8_3_NAMES
- "ENABLE_8_3_NAMES=" CTIMEOPT_VAL(SQLITE_ENABLE_8_3_NAMES),
-#endif
-#if SQLITE_ENABLE_API_ARMOR
- "ENABLE_API_ARMOR",
-#endif
-#if SQLITE_ENABLE_ATOMIC_WRITE
- "ENABLE_ATOMIC_WRITE",
-#endif
-#if SQLITE_ENABLE_CEROD
- "ENABLE_CEROD",
-#endif
-#if SQLITE_ENABLE_COLUMN_METADATA
- "ENABLE_COLUMN_METADATA",
-#endif
-#if SQLITE_ENABLE_DBSTAT_VTAB
- "ENABLE_DBSTAT_VTAB",
-#endif
-#if SQLITE_ENABLE_EXPENSIVE_ASSERT
- "ENABLE_EXPENSIVE_ASSERT",
-#endif
-#if SQLITE_ENABLE_FTS1
- "ENABLE_FTS1",
-#endif
-#if SQLITE_ENABLE_FTS2
- "ENABLE_FTS2",
-#endif
-#if SQLITE_ENABLE_FTS3
- "ENABLE_FTS3",
-#endif
-#if SQLITE_ENABLE_FTS3_PARENTHESIS
- "ENABLE_FTS3_PARENTHESIS",
-#endif
-#if SQLITE_ENABLE_FTS4
- "ENABLE_FTS4",
-#endif
-#if SQLITE_ENABLE_FTS5
- "ENABLE_FTS5",
-#endif
-#if SQLITE_ENABLE_ICU
- "ENABLE_ICU",
-#endif
-#if SQLITE_ENABLE_IOTRACE
- "ENABLE_IOTRACE",
-#endif
-#if SQLITE_ENABLE_JSON1
- "ENABLE_JSON1",
-#endif
-#if SQLITE_ENABLE_LOAD_EXTENSION
- "ENABLE_LOAD_EXTENSION",
-#endif
-#if SQLITE_ENABLE_LOCKING_STYLE
- "ENABLE_LOCKING_STYLE=" CTIMEOPT_VAL(SQLITE_ENABLE_LOCKING_STYLE),
-#endif
-#if SQLITE_ENABLE_MEMORY_MANAGEMENT
- "ENABLE_MEMORY_MANAGEMENT",
-#endif
-#if SQLITE_ENABLE_MEMSYS3
- "ENABLE_MEMSYS3",
-#endif
-#if SQLITE_ENABLE_MEMSYS5
- "ENABLE_MEMSYS5",
-#endif
-#if SQLITE_ENABLE_OVERSIZE_CELL_CHECK
- "ENABLE_OVERSIZE_CELL_CHECK",
-#endif
-#if SQLITE_ENABLE_RTREE
- "ENABLE_RTREE",
-#endif
-#if defined(SQLITE_ENABLE_STAT4)
- "ENABLE_STAT4",
-#elif defined(SQLITE_ENABLE_STAT3)
- "ENABLE_STAT3",
-#endif
-#if SQLITE_ENABLE_UNLOCK_NOTIFY
- "ENABLE_UNLOCK_NOTIFY",
-#endif
-#if SQLITE_ENABLE_UPDATE_DELETE_LIMIT
- "ENABLE_UPDATE_DELETE_LIMIT",
-#endif
-#if SQLITE_HAS_CODEC
- "HAS_CODEC",
-#endif
-#if HAVE_ISNAN || SQLITE_HAVE_ISNAN
- "HAVE_ISNAN",
-#endif
-#if SQLITE_HOMEGROWN_RECURSIVE_MUTEX
- "HOMEGROWN_RECURSIVE_MUTEX",
-#endif
-#if SQLITE_IGNORE_AFP_LOCK_ERRORS
- "IGNORE_AFP_LOCK_ERRORS",
-#endif
-#if SQLITE_IGNORE_FLOCK_LOCK_ERRORS
- "IGNORE_FLOCK_LOCK_ERRORS",
-#endif
-#ifdef SQLITE_INT64_TYPE
- "INT64_TYPE",
-#endif
-#ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS
- "LIKE_DOESNT_MATCH_BLOBS",
-#endif
-#if SQLITE_LOCK_TRACE
- "LOCK_TRACE",
-#endif
-#if defined(SQLITE_MAX_MMAP_SIZE) && !defined(SQLITE_MAX_MMAP_SIZE_xc)
- "MAX_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_MAX_MMAP_SIZE),
-#endif
-#ifdef SQLITE_MAX_SCHEMA_RETRY
- "MAX_SCHEMA_RETRY=" CTIMEOPT_VAL(SQLITE_MAX_SCHEMA_RETRY),
-#endif
-#if SQLITE_MEMDEBUG
- "MEMDEBUG",
-#endif
-#if SQLITE_MIXED_ENDIAN_64BIT_FLOAT
- "MIXED_ENDIAN_64BIT_FLOAT",
-#endif
-#if SQLITE_NO_SYNC
- "NO_SYNC",
-#endif
-#if SQLITE_OMIT_ALTERTABLE
- "OMIT_ALTERTABLE",
-#endif
-#if SQLITE_OMIT_ANALYZE
- "OMIT_ANALYZE",
-#endif
-#if SQLITE_OMIT_ATTACH
- "OMIT_ATTACH",
-#endif
-#if SQLITE_OMIT_AUTHORIZATION
- "OMIT_AUTHORIZATION",
-#endif
-#if SQLITE_OMIT_AUTOINCREMENT
- "OMIT_AUTOINCREMENT",
-#endif
-#if SQLITE_OMIT_AUTOINIT
- "OMIT_AUTOINIT",
-#endif
-#if SQLITE_OMIT_AUTOMATIC_INDEX
- "OMIT_AUTOMATIC_INDEX",
-#endif
-#if SQLITE_OMIT_AUTORESET
- "OMIT_AUTORESET",
-#endif
-#if SQLITE_OMIT_AUTOVACUUM
- "OMIT_AUTOVACUUM",
-#endif
-#if SQLITE_OMIT_BETWEEN_OPTIMIZATION
- "OMIT_BETWEEN_OPTIMIZATION",
-#endif
-#if SQLITE_OMIT_BLOB_LITERAL
- "OMIT_BLOB_LITERAL",
-#endif
-#if SQLITE_OMIT_BTREECOUNT
- "OMIT_BTREECOUNT",
-#endif
-#if SQLITE_OMIT_BUILTIN_TEST
- "OMIT_BUILTIN_TEST",
-#endif
-#if SQLITE_OMIT_CAST
- "OMIT_CAST",
-#endif
-#if SQLITE_OMIT_CHECK
- "OMIT_CHECK",
-#endif
-#if SQLITE_OMIT_COMPLETE
- "OMIT_COMPLETE",
-#endif
-#if SQLITE_OMIT_COMPOUND_SELECT
- "OMIT_COMPOUND_SELECT",
-#endif
-#if SQLITE_OMIT_CTE
- "OMIT_CTE",
-#endif
-#if SQLITE_OMIT_DATETIME_FUNCS
- "OMIT_DATETIME_FUNCS",
-#endif
-#if SQLITE_OMIT_DECLTYPE
- "OMIT_DECLTYPE",
-#endif
-#if SQLITE_OMIT_DEPRECATED
- "OMIT_DEPRECATED",
-#endif
-#if SQLITE_OMIT_DISKIO
- "OMIT_DISKIO",
-#endif
-#if SQLITE_OMIT_EXPLAIN
- "OMIT_EXPLAIN",
-#endif
-#if SQLITE_OMIT_FLAG_PRAGMAS
- "OMIT_FLAG_PRAGMAS",
-#endif
-#if SQLITE_OMIT_FLOATING_POINT
- "OMIT_FLOATING_POINT",
-#endif
-#if SQLITE_OMIT_FOREIGN_KEY
- "OMIT_FOREIGN_KEY",
-#endif
-#if SQLITE_OMIT_GET_TABLE
- "OMIT_GET_TABLE",
-#endif
-#if SQLITE_OMIT_INCRBLOB
- "OMIT_INCRBLOB",
-#endif
-#if SQLITE_OMIT_INTEGRITY_CHECK
- "OMIT_INTEGRITY_CHECK",
-#endif
-#if SQLITE_OMIT_LIKE_OPTIMIZATION
- "OMIT_LIKE_OPTIMIZATION",
-#endif
-#if SQLITE_OMIT_LOAD_EXTENSION
- "OMIT_LOAD_EXTENSION",
-#endif
-#if SQLITE_OMIT_LOCALTIME
- "OMIT_LOCALTIME",
-#endif
-#if SQLITE_OMIT_LOOKASIDE
- "OMIT_LOOKASIDE",
-#endif
-#if SQLITE_OMIT_MEMORYDB
- "OMIT_MEMORYDB",
-#endif
-#if SQLITE_OMIT_OR_OPTIMIZATION
- "OMIT_OR_OPTIMIZATION",
-#endif
-#if SQLITE_OMIT_PAGER_PRAGMAS
- "OMIT_PAGER_PRAGMAS",
-#endif
-#if SQLITE_OMIT_PRAGMA
- "OMIT_PRAGMA",
-#endif
-#if SQLITE_OMIT_PROGRESS_CALLBACK
- "OMIT_PROGRESS_CALLBACK",
-#endif
-#if SQLITE_OMIT_QUICKBALANCE
- "OMIT_QUICKBALANCE",
-#endif
-#if SQLITE_OMIT_REINDEX
- "OMIT_REINDEX",
-#endif
-#if SQLITE_OMIT_SCHEMA_PRAGMAS
- "OMIT_SCHEMA_PRAGMAS",
-#endif
-#if SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS
- "OMIT_SCHEMA_VERSION_PRAGMAS",
-#endif
-#if SQLITE_OMIT_SHARED_CACHE
- "OMIT_SHARED_CACHE",
-#endif
-#if SQLITE_OMIT_SUBQUERY
- "OMIT_SUBQUERY",
-#endif
-#if SQLITE_OMIT_TCL_VARIABLE
- "OMIT_TCL_VARIABLE",
-#endif
-#if SQLITE_OMIT_TEMPDB
- "OMIT_TEMPDB",
-#endif
-#if SQLITE_OMIT_TRACE
- "OMIT_TRACE",
-#endif
-#if SQLITE_OMIT_TRIGGER
- "OMIT_TRIGGER",
-#endif
-#if SQLITE_OMIT_TRUNCATE_OPTIMIZATION
- "OMIT_TRUNCATE_OPTIMIZATION",
-#endif
-#if SQLITE_OMIT_UTF16
- "OMIT_UTF16",
-#endif
-#if SQLITE_OMIT_VACUUM
- "OMIT_VACUUM",
-#endif
-#if SQLITE_OMIT_VIEW
- "OMIT_VIEW",
-#endif
-#if SQLITE_OMIT_VIRTUALTABLE
- "OMIT_VIRTUALTABLE",
-#endif
-#if SQLITE_OMIT_WAL
- "OMIT_WAL",
-#endif
-#if SQLITE_OMIT_WSD
- "OMIT_WSD",
-#endif
-#if SQLITE_OMIT_XFER_OPT
- "OMIT_XFER_OPT",
-#endif
-#if SQLITE_PERFORMANCE_TRACE
- "PERFORMANCE_TRACE",
-#endif
-#if SQLITE_PROXY_DEBUG
- "PROXY_DEBUG",
-#endif
-#if SQLITE_RTREE_INT_ONLY
- "RTREE_INT_ONLY",
-#endif
-#if SQLITE_SECURE_DELETE
- "SECURE_DELETE",
-#endif
-#if SQLITE_SMALL_STACK
- "SMALL_STACK",
-#endif
-#if SQLITE_SOUNDEX
- "SOUNDEX",
-#endif
-#if SQLITE_SYSTEM_MALLOC
- "SYSTEM_MALLOC",
-#endif
-#if SQLITE_TCL
- "TCL",
-#endif
-#if defined(SQLITE_TEMP_STORE) && !defined(SQLITE_TEMP_STORE_xc)
- "TEMP_STORE=" CTIMEOPT_VAL(SQLITE_TEMP_STORE),
-#endif
-#if SQLITE_TEST
- "TEST",
-#endif
-#if defined(SQLITE_THREADSAFE)
- "THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE),
-#endif
-#if SQLITE_USE_ALLOCA
- "USE_ALLOCA",
-#endif
-#if SQLITE_USER_AUTHENTICATION
- "USER_AUTHENTICATION",
-#endif
-#if SQLITE_WIN32_MALLOC
- "WIN32_MALLOC",
-#endif
-#if SQLITE_ZERO_MALLOC
- "ZERO_MALLOC"
-#endif
-};
-
-/*
-** Given the name of a compile-time option, return true if that option
-** was used and false if not.
-**
-** The name can optionally begin with "SQLITE_" but the "SQLITE_" prefix
-** is not required for a match.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_compileoption_used(const char *zOptName){
- int i, n;
-
-#if SQLITE_ENABLE_API_ARMOR
- if( zOptName==0 ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
- if( sqlite3StrNICmp(zOptName, "SQLITE_", 7)==0 ) zOptName += 7;
- n = sqlite3Strlen30(zOptName);
-
- /* Since ArraySize(azCompileOpt) is normally in single digits, a
- ** linear search is adequate. No need for a binary search. */
- for(i=0; i=0 && NaDb[] (or -1) */
- u8 nullRow; /* True if pointing to a row with no data */
- u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */
- u8 isTable; /* True for rowid tables. False for indexes */
-#ifdef SQLITE_DEBUG
- u8 seekOp; /* Most recent seek operation on this cursor */
- u8 wrFlag; /* The wrFlag argument to sqlite3BtreeCursor() */
-#endif
- Bool isEphemeral:1; /* True for an ephemeral table */
- Bool useRandomRowid:1;/* Generate new record numbers semi-randomly */
- Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */
- Pgno pgnoRoot; /* Root page of the open btree cursor */
- i16 nField; /* Number of fields in the header */
- u16 nHdrParsed; /* Number of header fields parsed so far */
- union {
- BtCursor *pCursor; /* CURTYPE_BTREE. Btree cursor */
- sqlite3_vtab_cursor *pVCur; /* CURTYPE_VTAB. Vtab cursor */
- int pseudoTableReg; /* CURTYPE_PSEUDO. Reg holding content. */
- VdbeSorter *pSorter; /* CURTYPE_SORTER. Sorter object */
- } uc;
- Btree *pBt; /* Separate file holding temporary table */
- KeyInfo *pKeyInfo; /* Info about index keys needed by index cursors */
- int seekResult; /* Result of previous sqlite3BtreeMoveto() */
- i64 seqCount; /* Sequence counter */
- i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */
- VdbeCursor *pAltCursor; /* Associated index cursor from which to read */
- int *aAltMap; /* Mapping from table to index column numbers */
-#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
- u64 maskUsed; /* Mask of columns used by this cursor */
-#endif
-
- /* Cached information about the header for the data record that the
- ** cursor is currently pointing to. Only valid if cacheStatus matches
- ** Vdbe.cacheCtr. Vdbe.cacheCtr will never take on the value of
- ** CACHE_STALE and so setting cacheStatus=CACHE_STALE guarantees that
- ** the cache is out of date.
- **
- ** aRow might point to (ephemeral) data for the current row, or it might
- ** be NULL.
- */
- u32 cacheStatus; /* Cache is valid if this matches Vdbe.cacheCtr */
- u32 payloadSize; /* Total number of bytes in the record */
- u32 szRow; /* Byte available in aRow */
- u32 iHdrOffset; /* Offset to next unparsed byte of the header */
- const u8 *aRow; /* Data for the current row, if all on one page */
- u32 *aOffset; /* Pointer to aType[nField] */
- u32 aType[1]; /* Type values for all entries in the record */
- /* 2*nField extra array elements allocated for aType[], beyond the one
- ** static element declared in the structure. nField total array slots for
- ** aType[] and nField+1 array slots for aOffset[] */
-};
-
-/*
-** When a sub-program is executed (OP_Program), a structure of this type
-** is allocated to store the current value of the program counter, as
-** well as the current memory cell array and various other frame specific
-** values stored in the Vdbe struct. When the sub-program is finished,
-** these values are copied back to the Vdbe from the VdbeFrame structure,
-** restoring the state of the VM to as it was before the sub-program
-** began executing.
-**
-** The memory for a VdbeFrame object is allocated and managed by a memory
-** cell in the parent (calling) frame. When the memory cell is deleted or
-** overwritten, the VdbeFrame object is not freed immediately. Instead, it
-** is linked into the Vdbe.pDelFrame list. The contents of the Vdbe.pDelFrame
-** list is deleted when the VM is reset in VdbeHalt(). The reason for doing
-** this instead of deleting the VdbeFrame immediately is to avoid recursive
-** calls to sqlite3VdbeMemRelease() when the memory cells belonging to the
-** child frame are released.
-**
-** The currently executing frame is stored in Vdbe.pFrame. Vdbe.pFrame is
-** set to NULL if the currently executing frame is the main program.
-*/
-typedef struct VdbeFrame VdbeFrame;
-struct VdbeFrame {
- Vdbe *v; /* VM this frame belongs to */
- VdbeFrame *pParent; /* Parent of this frame, or NULL if parent is main */
- Op *aOp; /* Program instructions for parent frame */
- i64 *anExec; /* Event counters from parent frame */
- Mem *aMem; /* Array of memory cells for parent frame */
- u8 *aOnceFlag; /* Array of OP_Once flags for parent frame */
- VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */
- void *token; /* Copy of SubProgram.token */
- i64 lastRowid; /* Last insert rowid (sqlite3.lastRowid) */
- AuxData *pAuxData; /* Linked list of auxdata allocations */
- int nCursor; /* Number of entries in apCsr */
- int pc; /* Program Counter in parent (calling) frame */
- int nOp; /* Size of aOp array */
- int nMem; /* Number of entries in aMem */
- int nOnceFlag; /* Number of entries in aOnceFlag */
- int nChildMem; /* Number of memory cells for child frame */
- int nChildCsr; /* Number of cursors for child frame */
- int nChange; /* Statement changes (Vdbe.nChange) */
- int nDbChange; /* Value of db->nChange */
-};
-
-#define VdbeFrameMem(p) ((Mem *)&((u8 *)p)[ROUND8(sizeof(VdbeFrame))])
-
-/*
-** A value for VdbeCursor.cacheValid that means the cache is always invalid.
-*/
-#define CACHE_STALE 0
-
-/*
-** Internally, the vdbe manipulates nearly all SQL values as Mem
-** structures. Each Mem struct may cache multiple representations (string,
-** integer etc.) of the same value.
-*/
-struct Mem {
- union MemValue {
- double r; /* Real value used when MEM_Real is set in flags */
- i64 i; /* Integer value used when MEM_Int is set in flags */
- int nZero; /* Used when bit MEM_Zero is set in flags */
- FuncDef *pDef; /* Used only when flags==MEM_Agg */
- RowSet *pRowSet; /* Used only when flags==MEM_RowSet */
- VdbeFrame *pFrame; /* Used when flags==MEM_Frame */
- } u;
- u16 flags; /* Some combination of MEM_Null, MEM_Str, MEM_Dyn, etc. */
- u8 enc; /* SQLITE_UTF8, SQLITE_UTF16BE, SQLITE_UTF16LE */
- u8 eSubtype; /* Subtype for this value */
- int n; /* Number of characters in string value, excluding '\0' */
- char *z; /* String or BLOB value */
- /* ShallowCopy only needs to copy the information above */
- char *zMalloc; /* Space to hold MEM_Str or MEM_Blob if szMalloc>0 */
- int szMalloc; /* Size of the zMalloc allocation */
- u32 uTemp; /* Transient storage for serial_type in OP_MakeRecord */
- sqlite3 *db; /* The associated database connection */
- void (*xDel)(void*);/* Destructor for Mem.z - only valid if MEM_Dyn */
-#ifdef SQLITE_DEBUG
- Mem *pScopyFrom; /* This Mem is a shallow copy of pScopyFrom */
- void *pFiller; /* So that sizeof(Mem) is a multiple of 8 */
-#endif
-};
-
-/*
-** Size of struct Mem not including the Mem.zMalloc member or anything that
-** follows.
-*/
-#define MEMCELLSIZE offsetof(Mem,zMalloc)
-
-/* One or more of the following flags are set to indicate the validOK
-** representations of the value stored in the Mem struct.
-**
-** If the MEM_Null flag is set, then the value is an SQL NULL value.
-** No other flags may be set in this case.
-**
-** If the MEM_Str flag is set then Mem.z points at a string representation.
-** Usually this is encoded in the same unicode encoding as the main
-** database (see below for exceptions). If the MEM_Term flag is also
-** set, then the string is nul terminated. The MEM_Int and MEM_Real
-** flags may coexist with the MEM_Str flag.
-*/
-#define MEM_Null 0x0001 /* Value is NULL */
-#define MEM_Str 0x0002 /* Value is a string */
-#define MEM_Int 0x0004 /* Value is an integer */
-#define MEM_Real 0x0008 /* Value is a real number */
-#define MEM_Blob 0x0010 /* Value is a BLOB */
-#define MEM_AffMask 0x001f /* Mask of affinity bits */
-#define MEM_RowSet 0x0020 /* Value is a RowSet object */
-#define MEM_Frame 0x0040 /* Value is a VdbeFrame object */
-#define MEM_Undefined 0x0080 /* Value is undefined */
-#define MEM_Cleared 0x0100 /* NULL set by OP_Null, not from data */
-#define MEM_TypeMask 0x81ff /* Mask of type bits */
-
-
-/* Whenever Mem contains a valid string or blob representation, one of
-** the following flags must be set to determine the memory management
-** policy for Mem.z. The MEM_Term flag tells us whether or not the
-** string is \000 or \u0000 terminated
-*/
-#define MEM_Term 0x0200 /* String rep is nul terminated */
-#define MEM_Dyn 0x0400 /* Need to call Mem.xDel() on Mem.z */
-#define MEM_Static 0x0800 /* Mem.z points to a static string */
-#define MEM_Ephem 0x1000 /* Mem.z points to an ephemeral string */
-#define MEM_Agg 0x2000 /* Mem.z points to an agg function context */
-#define MEM_Zero 0x4000 /* Mem.i contains count of 0s appended to blob */
-#define MEM_Subtype 0x8000 /* Mem.eSubtype is valid */
-#ifdef SQLITE_OMIT_INCRBLOB
- #undef MEM_Zero
- #define MEM_Zero 0x0000
-#endif
-
-/* Return TRUE if Mem X contains dynamically allocated content - anything
-** that needs to be deallocated to avoid a leak.
-*/
-#define VdbeMemDynamic(X) \
- (((X)->flags&(MEM_Agg|MEM_Dyn|MEM_RowSet|MEM_Frame))!=0)
-
-/*
-** Clear any existing type flags from a Mem and replace them with f
-*/
-#define MemSetTypeFlag(p, f) \
- ((p)->flags = ((p)->flags&~(MEM_TypeMask|MEM_Zero))|f)
-
-/*
-** Return true if a memory cell is not marked as invalid. This macro
-** is for use inside assert() statements only.
-*/
-#ifdef SQLITE_DEBUG
-#define memIsValid(M) ((M)->flags & MEM_Undefined)==0
-#endif
-
-/*
-** Each auxiliary data pointer stored by a user defined function
-** implementation calling sqlite3_set_auxdata() is stored in an instance
-** of this structure. All such structures associated with a single VM
-** are stored in a linked list headed at Vdbe.pAuxData. All are destroyed
-** when the VM is halted (if not before).
-*/
-struct AuxData {
- int iOp; /* Instruction number of OP_Function opcode */
- int iArg; /* Index of function argument. */
- void *pAux; /* Aux data pointer */
- void (*xDelete)(void *); /* Destructor for the aux data */
- AuxData *pNext; /* Next element in list */
-};
-
-/*
-** The "context" argument for an installable function. A pointer to an
-** instance of this structure is the first argument to the routines used
-** implement the SQL functions.
-**
-** There is a typedef for this structure in sqlite.h. So all routines,
-** even the public interface to SQLite, can use a pointer to this structure.
-** But this file is the only place where the internal details of this
-** structure are known.
-**
-** This structure is defined inside of vdbeInt.h because it uses substructures
-** (Mem) which are only defined there.
-*/
-struct sqlite3_context {
- Mem *pOut; /* The return value is stored here */
- FuncDef *pFunc; /* Pointer to function information */
- Mem *pMem; /* Memory cell used to store aggregate context */
- Vdbe *pVdbe; /* The VM that owns this context */
- int iOp; /* Instruction number of OP_Function */
- int isError; /* Error code returned by the function. */
- u8 skipFlag; /* Skip accumulator loading if true */
- u8 fErrorOrAux; /* isError!=0 or pVdbe->pAuxData modified */
- u8 argc; /* Number of arguments */
- sqlite3_value *argv[1]; /* Argument set */
-};
-
-/*
-** An Explain object accumulates indented output which is helpful
-** in describing recursive data structures.
-*/
-struct Explain {
- Vdbe *pVdbe; /* Attach the explanation to this Vdbe */
- StrAccum str; /* The string being accumulated */
- int nIndent; /* Number of elements in aIndent */
- u16 aIndent[100]; /* Levels of indentation */
- char zBase[100]; /* Initial space */
-};
-
-/* A bitfield type for use inside of structures. Always follow with :N where
-** N is the number of bits.
-*/
-typedef unsigned bft; /* Bit Field Type */
-
-typedef struct ScanStatus ScanStatus;
-struct ScanStatus {
- int addrExplain; /* OP_Explain for loop */
- int addrLoop; /* Address of "loops" counter */
- int addrVisit; /* Address of "rows visited" counter */
- int iSelectID; /* The "Select-ID" for this loop */
- LogEst nEst; /* Estimated output rows per loop */
- char *zName; /* Name of table or index */
-};
-
-/*
-** An instance of the virtual machine. This structure contains the complete
-** state of the virtual machine.
-**
-** The "sqlite3_stmt" structure pointer that is returned by sqlite3_prepare()
-** is really a pointer to an instance of this structure.
-*/
-struct Vdbe {
- sqlite3 *db; /* The database connection that owns this statement */
- Op *aOp; /* Space to hold the virtual machine's program */
- Mem *aMem; /* The memory locations */
- Mem **apArg; /* Arguments to currently executing user function */
- Mem *aColName; /* Column names to return */
- Mem *pResultSet; /* Pointer to an array of results */
- Parse *pParse; /* Parsing context used to create this Vdbe */
- int nMem; /* Number of memory locations currently allocated */
- int nOp; /* Number of instructions in the program */
- int nCursor; /* Number of slots in apCsr[] */
- u32 magic; /* Magic number for sanity checking */
- char *zErrMsg; /* Error message written here */
- Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */
- VdbeCursor **apCsr; /* One element of this array for each open cursor */
- Mem *aVar; /* Values for the OP_Variable opcode. */
- char **azVar; /* Name of variables */
- ynVar nVar; /* Number of entries in aVar[] */
- ynVar nzVar; /* Number of entries in azVar[] */
- u32 cacheCtr; /* VdbeCursor row cache generation counter */
- int pc; /* The program counter */
- int rc; /* Value to return */
-#ifdef SQLITE_DEBUG
- int rcApp; /* errcode set by sqlite3_result_error_code() */
-#endif
- u16 nResColumn; /* Number of columns in one row of the result set */
- u8 errorAction; /* Recovery action to do in case of an error */
- bft expired:1; /* True if the VM needs to be recompiled */
- bft doingRerun:1; /* True if rerunning after an auto-reprepare */
- u8 minWriteFileFormat; /* Minimum file format for writable database files */
- bft explain:2; /* True if EXPLAIN present on SQL command */
- bft changeCntOn:1; /* True to update the change-counter */
- bft runOnlyOnce:1; /* Automatically expire on reset */
- bft usesStmtJournal:1; /* True if uses a statement journal */
- bft readOnly:1; /* True for statements that do not write */
- bft bIsReader:1; /* True for statements that read */
- bft isPrepareV2:1; /* True if prepared with prepare_v2() */
- int nChange; /* Number of db changes made since last reset */
- yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */
- yDbMask lockMask; /* Subset of btreeMask that requires a lock */
- int iStatement; /* Statement number (or 0 if has not opened stmt) */
- u32 aCounter[5]; /* Counters used by sqlite3_stmt_status() */
-#ifndef SQLITE_OMIT_TRACE
- i64 startTime; /* Time when query started - used for profiling */
-#endif
- i64 iCurrentTime; /* Value of julianday('now') for this statement */
- i64 nFkConstraint; /* Number of imm. FK constraints this VM */
- i64 nStmtDefCons; /* Number of def. constraints when stmt started */
- i64 nStmtDefImmCons; /* Number of def. imm constraints when stmt started */
- char *zSql; /* Text of the SQL statement that generated this */
- void *pFree; /* Free this when deleting the vdbe */
- VdbeFrame *pFrame; /* Parent frame */
- VdbeFrame *pDelFrame; /* List of frame objects to free on VM reset */
- int nFrame; /* Number of frames in pFrame list */
- u32 expmask; /* Binding to these vars invalidates VM */
- SubProgram *pProgram; /* Linked list of all sub-programs used by VM */
- int nOnceFlag; /* Size of array aOnceFlag[] */
- u8 *aOnceFlag; /* Flags for OP_Once */
- AuxData *pAuxData; /* Linked list of auxdata allocations */
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- i64 *anExec; /* Number of times each op has been executed */
- int nScan; /* Entries in aScan[] */
- ScanStatus *aScan; /* Scan definitions for sqlite3_stmt_scanstatus() */
-#endif
-};
-
-/*
-** The following are allowed values for Vdbe.magic
-*/
-#define VDBE_MAGIC_INIT 0x26bceaa5 /* Building a VDBE program */
-#define VDBE_MAGIC_RUN 0xbdf20da3 /* VDBE is ready to execute */
-#define VDBE_MAGIC_HALT 0x519c2973 /* VDBE has completed execution */
-#define VDBE_MAGIC_DEAD 0xb606c3c8 /* The VDBE has been deallocated */
-
-/*
-** Structure used to store the context required by the
-** sqlite3_preupdate_*() API functions.
-*/
-struct PreUpdate {
- Vdbe *v;
- VdbeCursor *pCsr; /* Cursor to read old values from */
- int op; /* One of SQLITE_INSERT, UPDATE, DELETE */
- u8 *aRecord; /* old.* database record */
- KeyInfo keyinfo;
- UnpackedRecord *pUnpacked; /* Unpacked version of aRecord[] */
- UnpackedRecord *pNewUnpacked; /* Unpacked version of new.* record */
- int iNewReg; /* Register for new.* values */
- i64 iKey1; /* First key value passed to hook */
- i64 iKey2; /* Second key value passed to hook */
- int iPKey; /* If not negative index of IPK column */
- Mem *aNew; /* Array of new.* values */
-};
-
-/*
-** Function prototypes
-*/
-SQLITE_PRIVATE void sqlite3VdbeError(Vdbe*, const char *, ...);
-SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *, VdbeCursor*);
-void sqliteVdbePopStack(Vdbe*,int);
-SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor**, int*);
-SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor*);
-#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
-SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, Op*);
-#endif
-SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32);
-SQLITE_PRIVATE u8 sqlite3VdbeOneByteSerialTypeLen(u8);
-SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem*, int, u32*);
-SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(unsigned char*, Mem*, u32);
-SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*);
-SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3*, AuxData**, int, int);
-
-int sqlite2BtreeKeyCompare(BtCursor *, const void *, int, int, int *);
-SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(sqlite3*,VdbeCursor*,UnpackedRecord*,int*);
-SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3*, BtCursor*, i64*);
-SQLITE_PRIVATE int sqlite3VdbeExec(Vdbe*);
-SQLITE_PRIVATE int sqlite3VdbeList(Vdbe*);
-SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe*);
-SQLITE_PRIVATE int sqlite3VdbeChangeEncoding(Mem *, int);
-SQLITE_PRIVATE int sqlite3VdbeMemTooBig(Mem*);
-SQLITE_PRIVATE int sqlite3VdbeMemCopy(Mem*, const Mem*);
-SQLITE_PRIVATE void sqlite3VdbeMemShallowCopy(Mem*, const Mem*, int);
-SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem*, Mem*);
-SQLITE_PRIVATE int sqlite3VdbeMemNulTerminate(Mem*);
-SQLITE_PRIVATE int sqlite3VdbeMemSetStr(Mem*, const char*, int, u8, void(*)(void*));
-SQLITE_PRIVATE void sqlite3VdbeMemSetInt64(Mem*, i64);
-#ifdef SQLITE_OMIT_FLOATING_POINT
-# define sqlite3VdbeMemSetDouble sqlite3VdbeMemSetInt64
-#else
-SQLITE_PRIVATE void sqlite3VdbeMemSetDouble(Mem*, double);
-#endif
-SQLITE_PRIVATE void sqlite3VdbeMemInit(Mem*,sqlite3*,u16);
-SQLITE_PRIVATE void sqlite3VdbeMemSetNull(Mem*);
-SQLITE_PRIVATE void sqlite3VdbeMemSetZeroBlob(Mem*,int);
-SQLITE_PRIVATE void sqlite3VdbeMemSetRowSet(Mem*);
-SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*);
-SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8);
-SQLITE_PRIVATE i64 sqlite3VdbeIntValue(Mem*);
-SQLITE_PRIVATE int sqlite3VdbeMemIntegerify(Mem*);
-SQLITE_PRIVATE double sqlite3VdbeRealValue(Mem*);
-SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem*);
-SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem*);
-SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem*);
-SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem*,u8,u8);
-SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,int,Mem*);
-SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p);
-SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem*, FuncDef*);
-SQLITE_PRIVATE const char *sqlite3OpcodeName(int);
-SQLITE_PRIVATE int sqlite3VdbeMemGrow(Mem *pMem, int n, int preserve);
-SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int n);
-SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *, int);
-SQLITE_PRIVATE void sqlite3VdbeFrameDelete(VdbeFrame*);
-SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *);
-#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
-SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(Vdbe*,VdbeCursor*,int,const char*,Table*,i64,int);
-#endif
-SQLITE_PRIVATE int sqlite3VdbeTransferError(Vdbe *p);
-
-SQLITE_PRIVATE int sqlite3VdbeSorterInit(sqlite3 *, int, VdbeCursor *);
-SQLITE_PRIVATE void sqlite3VdbeSorterReset(sqlite3 *, VdbeSorter *);
-SQLITE_PRIVATE void sqlite3VdbeSorterClose(sqlite3 *, VdbeCursor *);
-SQLITE_PRIVATE int sqlite3VdbeSorterRowkey(const VdbeCursor *, Mem *);
-SQLITE_PRIVATE int sqlite3VdbeSorterNext(sqlite3 *, const VdbeCursor *, int *);
-SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *, int *);
-SQLITE_PRIVATE int sqlite3VdbeSorterWrite(const VdbeCursor *, Mem *);
-SQLITE_PRIVATE int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *);
-
-#if !defined(SQLITE_OMIT_SHARED_CACHE)
-SQLITE_PRIVATE void sqlite3VdbeEnter(Vdbe*);
-#else
-# define sqlite3VdbeEnter(X)
-#endif
-
-#if !defined(SQLITE_OMIT_SHARED_CACHE) && SQLITE_THREADSAFE>0
-SQLITE_PRIVATE void sqlite3VdbeLeave(Vdbe*);
-#else
-# define sqlite3VdbeLeave(X)
-#endif
-
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE void sqlite3VdbeMemAboutToChange(Vdbe*,Mem*);
-SQLITE_PRIVATE int sqlite3VdbeCheckMemInvariants(Mem*);
-#endif
-
-#ifndef SQLITE_OMIT_FOREIGN_KEY
-SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *, int);
-#else
-# define sqlite3VdbeCheckFk(p,i) 0
-#endif
-
-SQLITE_PRIVATE int sqlite3VdbeMemTranslate(Mem*, u8);
-#ifdef SQLITE_DEBUG
-SQLITE_PRIVATE void sqlite3VdbePrintSql(Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, char *zBuf);
-#endif
-SQLITE_PRIVATE int sqlite3VdbeMemHandleBom(Mem *pMem);
-
-#ifndef SQLITE_OMIT_INCRBLOB
-SQLITE_PRIVATE int sqlite3VdbeMemExpandBlob(Mem *);
- #define ExpandBlob(P) (((P)->flags&MEM_Zero)?sqlite3VdbeMemExpandBlob(P):0)
-#else
- #define sqlite3VdbeMemExpandBlob(x) SQLITE_OK
- #define ExpandBlob(P) SQLITE_OK
-#endif
-
-#endif /* !defined(SQLITE_VDBEINT_H) */
-
-/************** End of vdbeInt.h *********************************************/
-/************** Continuing where we left off in status.c *********************/
-
-/*
-** Variables in which to record status information.
-*/
-#if SQLITE_PTRSIZE>4
-typedef sqlite3_int64 sqlite3StatValueType;
-#else
-typedef u32 sqlite3StatValueType;
-#endif
-typedef struct sqlite3StatType sqlite3StatType;
-static SQLITE_WSD struct sqlite3StatType {
- sqlite3StatValueType nowValue[10]; /* Current value */
- sqlite3StatValueType mxValue[10]; /* Maximum value */
-} sqlite3Stat = { {0,}, {0,} };
-
-/*
-** Elements of sqlite3Stat[] are protected by either the memory allocator
-** mutex, or by the pcache1 mutex. The following array determines which.
-*/
-static const char statMutex[] = {
- 0, /* SQLITE_STATUS_MEMORY_USED */
- 1, /* SQLITE_STATUS_PAGECACHE_USED */
- 1, /* SQLITE_STATUS_PAGECACHE_OVERFLOW */
- 0, /* SQLITE_STATUS_SCRATCH_USED */
- 0, /* SQLITE_STATUS_SCRATCH_OVERFLOW */
- 0, /* SQLITE_STATUS_MALLOC_SIZE */
- 0, /* SQLITE_STATUS_PARSER_STACK */
- 1, /* SQLITE_STATUS_PAGECACHE_SIZE */
- 0, /* SQLITE_STATUS_SCRATCH_SIZE */
- 0, /* SQLITE_STATUS_MALLOC_COUNT */
-};
-
-
-/* The "wsdStat" macro will resolve to the status information
-** state vector. If writable static data is unsupported on the target,
-** we have to locate the state vector at run-time. In the more common
-** case where writable static data is supported, wsdStat can refer directly
-** to the "sqlite3Stat" state vector declared above.
-*/
-#ifdef SQLITE_OMIT_WSD
-# define wsdStatInit sqlite3StatType *x = &GLOBAL(sqlite3StatType,sqlite3Stat)
-# define wsdStat x[0]
-#else
-# define wsdStatInit
-# define wsdStat sqlite3Stat
-#endif
-
-/*
-** Return the current value of a status parameter. The caller must
-** be holding the appropriate mutex.
-*/
-SQLITE_PRIVATE sqlite3_int64 sqlite3StatusValue(int op){
- wsdStatInit;
- assert( op>=0 && op=0 && op=0 && op=0 && opwsdStat.mxValue[op] ){
- wsdStat.mxValue[op] = wsdStat.nowValue[op];
- }
-}
-SQLITE_PRIVATE void sqlite3StatusDown(int op, int N){
- wsdStatInit;
- assert( N>=0 );
- assert( op>=0 && op=0 && op=0 );
- newValue = (sqlite3StatValueType)X;
- assert( op>=0 && op=0 && opwsdStat.mxValue[op] ){
- wsdStat.mxValue[op] = newValue;
- }
-}
-
-/*
-** Query status information.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_status64(
- int op,
- sqlite3_int64 *pCurrent,
- sqlite3_int64 *pHighwater,
- int resetFlag
-){
- sqlite3_mutex *pMutex;
- wsdStatInit;
- if( op<0 || op>=ArraySize(wsdStat.nowValue) ){
- return SQLITE_MISUSE_BKPT;
- }
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT;
-#endif
- pMutex = statMutex[op] ? sqlite3Pcache1Mutex() : sqlite3MallocMutex();
- sqlite3_mutex_enter(pMutex);
- *pCurrent = wsdStat.nowValue[op];
- *pHighwater = wsdStat.mxValue[op];
- if( resetFlag ){
- wsdStat.mxValue[op] = wsdStat.nowValue[op];
- }
- sqlite3_mutex_leave(pMutex);
- (void)pMutex; /* Prevent warning when SQLITE_THREADSAFE=0 */
- return SQLITE_OK;
-}
-SQLITE_API int SQLITE_STDCALL sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag){
- sqlite3_int64 iCur = 0, iHwtr = 0;
- int rc;
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( pCurrent==0 || pHighwater==0 ) return SQLITE_MISUSE_BKPT;
-#endif
- rc = sqlite3_status64(op, &iCur, &iHwtr, resetFlag);
- if( rc==0 ){
- *pCurrent = (int)iCur;
- *pHighwater = (int)iHwtr;
- }
- return rc;
-}
-
-/*
-** Query status information for a single database connection
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_db_status(
- sqlite3 *db, /* The database connection whose status is desired */
- int op, /* Status verb */
- int *pCurrent, /* Write current value here */
- int *pHighwater, /* Write high-water mark here */
- int resetFlag /* Reset high-water mark if true */
-){
- int rc = SQLITE_OK; /* Return code */
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) || pCurrent==0|| pHighwater==0 ){
- return SQLITE_MISUSE_BKPT;
- }
-#endif
- sqlite3_mutex_enter(db->mutex);
- switch( op ){
- case SQLITE_DBSTATUS_LOOKASIDE_USED: {
- *pCurrent = db->lookaside.nOut;
- *pHighwater = db->lookaside.mxOut;
- if( resetFlag ){
- db->lookaside.mxOut = db->lookaside.nOut;
- }
- break;
- }
-
- case SQLITE_DBSTATUS_LOOKASIDE_HIT:
- case SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE:
- case SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL: {
- testcase( op==SQLITE_DBSTATUS_LOOKASIDE_HIT );
- testcase( op==SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE );
- testcase( op==SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL );
- assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)>=0 );
- assert( (op-SQLITE_DBSTATUS_LOOKASIDE_HIT)<3 );
- *pCurrent = 0;
- *pHighwater = db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT];
- if( resetFlag ){
- db->lookaside.anStat[op - SQLITE_DBSTATUS_LOOKASIDE_HIT] = 0;
- }
- break;
- }
-
- /*
- ** Return an approximation for the amount of memory currently used
- ** by all pagers associated with the given database connection. The
- ** highwater mark is meaningless and is returned as zero.
- */
- case SQLITE_DBSTATUS_CACHE_USED_SHARED:
- case SQLITE_DBSTATUS_CACHE_USED: {
- int totalUsed = 0;
- int i;
- sqlite3BtreeEnterAll(db);
- for(i=0; inDb; i++){
- Btree *pBt = db->aDb[i].pBt;
- if( pBt ){
- Pager *pPager = sqlite3BtreePager(pBt);
- int nByte = sqlite3PagerMemUsed(pPager);
- if( op==SQLITE_DBSTATUS_CACHE_USED_SHARED ){
- nByte = nByte / sqlite3BtreeConnectionCount(pBt);
- }
- totalUsed += nByte;
- }
- }
- sqlite3BtreeLeaveAll(db);
- *pCurrent = totalUsed;
- *pHighwater = 0;
- break;
- }
-
- /*
- ** *pCurrent gets an accurate estimate of the amount of memory used
- ** to store the schema for all databases (main, temp, and any ATTACHed
- ** databases. *pHighwater is set to zero.
- */
- case SQLITE_DBSTATUS_SCHEMA_USED: {
- int i; /* Used to iterate through schemas */
- int nByte = 0; /* Used to accumulate return value */
-
- sqlite3BtreeEnterAll(db);
- db->pnBytesFreed = &nByte;
- for(i=0; inDb; i++){
- Schema *pSchema = db->aDb[i].pSchema;
- if( ALWAYS(pSchema!=0) ){
- HashElem *p;
-
- nByte += sqlite3GlobalConfig.m.xRoundup(sizeof(HashElem)) * (
- pSchema->tblHash.count
- + pSchema->trigHash.count
- + pSchema->idxHash.count
- + pSchema->fkeyHash.count
- );
- nByte += sqlite3_msize(pSchema->tblHash.ht);
- nByte += sqlite3_msize(pSchema->trigHash.ht);
- nByte += sqlite3_msize(pSchema->idxHash.ht);
- nByte += sqlite3_msize(pSchema->fkeyHash.ht);
-
- for(p=sqliteHashFirst(&pSchema->trigHash); p; p=sqliteHashNext(p)){
- sqlite3DeleteTrigger(db, (Trigger*)sqliteHashData(p));
- }
- for(p=sqliteHashFirst(&pSchema->tblHash); p; p=sqliteHashNext(p)){
- sqlite3DeleteTable(db, (Table *)sqliteHashData(p));
- }
- }
- }
- db->pnBytesFreed = 0;
- sqlite3BtreeLeaveAll(db);
-
- *pHighwater = 0;
- *pCurrent = nByte;
- break;
- }
-
- /*
- ** *pCurrent gets an accurate estimate of the amount of memory used
- ** to store all prepared statements.
- ** *pHighwater is set to zero.
- */
- case SQLITE_DBSTATUS_STMT_USED: {
- struct Vdbe *pVdbe; /* Used to iterate through VMs */
- int nByte = 0; /* Used to accumulate return value */
-
- db->pnBytesFreed = &nByte;
- for(pVdbe=db->pVdbe; pVdbe; pVdbe=pVdbe->pNext){
- sqlite3VdbeClearObject(db, pVdbe);
- sqlite3DbFree(db, pVdbe);
- }
- db->pnBytesFreed = 0;
-
- *pHighwater = 0; /* IMP: R-64479-57858 */
- *pCurrent = nByte;
-
- break;
- }
-
- /*
- ** Set *pCurrent to the total cache hits or misses encountered by all
- ** pagers the database handle is connected to. *pHighwater is always set
- ** to zero.
- */
- case SQLITE_DBSTATUS_CACHE_HIT:
- case SQLITE_DBSTATUS_CACHE_MISS:
- case SQLITE_DBSTATUS_CACHE_WRITE:{
- int i;
- int nRet = 0;
- assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 );
- assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 );
-
- for(i=0; inDb; i++){
- if( db->aDb[i].pBt ){
- Pager *pPager = sqlite3BtreePager(db->aDb[i].pBt);
- sqlite3PagerCacheStat(pPager, op, resetFlag, &nRet);
- }
- }
- *pHighwater = 0; /* IMP: R-42420-56072 */
- /* IMP: R-54100-20147 */
- /* IMP: R-29431-39229 */
- *pCurrent = nRet;
- break;
- }
-
- /* Set *pCurrent to non-zero if there are unresolved deferred foreign
- ** key constraints. Set *pCurrent to zero if all foreign key constraints
- ** have been satisfied. The *pHighwater is always set to zero.
- */
- case SQLITE_DBSTATUS_DEFERRED_FKS: {
- *pHighwater = 0; /* IMP: R-11967-56545 */
- *pCurrent = db->nDeferredImmCons>0 || db->nDeferredCons>0;
- break;
- }
-
- default: {
- rc = SQLITE_ERROR;
- }
- }
- sqlite3_mutex_leave(db->mutex);
- return rc;
-}
-
-/************** End of status.c **********************************************/
-/************** Begin file date.c ********************************************/
-/*
-** 2003 October 31
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This file contains the C functions that implement date and time
-** functions for SQLite.
-**
-** There is only one exported symbol in this file - the function
-** sqlite3RegisterDateTimeFunctions() found at the bottom of the file.
-** All other code has file scope.
-**
-** SQLite processes all times and dates as julian day numbers. The
-** dates and times are stored as the number of days since noon
-** in Greenwich on November 24, 4714 B.C. according to the Gregorian
-** calendar system.
-**
-** 1970-01-01 00:00:00 is JD 2440587.5
-** 2000-01-01 00:00:00 is JD 2451544.5
-**
-** This implementation requires years to be expressed as a 4-digit number
-** which means that only dates between 0000-01-01 and 9999-12-31 can
-** be represented, even though julian day numbers allow a much wider
-** range of dates.
-**
-** The Gregorian calendar system is used for all dates and times,
-** even those that predate the Gregorian calendar. Historians usually
-** use the julian calendar for dates prior to 1582-10-15 and for some
-** dates afterwards, depending on locale. Beware of this difference.
-**
-** The conversion algorithms are implemented based on descriptions
-** in the following text:
-**
-** Jean Meeus
-** Astronomical Algorithms, 2nd Edition, 1998
-** ISBM 0-943396-61-1
-** Willmann-Bell, Inc
-** Richmond, Virginia (USA)
-*/
-/* #include "sqliteInt.h" */
-/* #include */
-/* #include */
-#include
-
-#ifndef SQLITE_OMIT_DATETIME_FUNCS
-
-/*
-** The MSVC CRT on Windows CE may not have a localtime() function.
-** So declare a substitute. The substitute function itself is
-** defined in "os_win.c".
-*/
-#if !defined(SQLITE_OMIT_LOCALTIME) && defined(_WIN32_WCE) && \
- (!defined(SQLITE_MSVC_LOCALTIME_API) || !SQLITE_MSVC_LOCALTIME_API)
-struct tm *__cdecl localtime(const time_t *);
-#endif
-
-/*
-** A structure for holding a single date and time.
-*/
-typedef struct DateTime DateTime;
-struct DateTime {
- sqlite3_int64 iJD; /* The julian day number times 86400000 */
- int Y, M, D; /* Year, month, and day */
- int h, m; /* Hour and minutes */
- int tz; /* Timezone offset in minutes */
- double s; /* Seconds */
- char validYMD; /* True (1) if Y,M,D are valid */
- char validHMS; /* True (1) if h,m,s are valid */
- char validJD; /* True (1) if iJD is valid */
- char validTZ; /* True (1) if tz is valid */
- char tzSet; /* Timezone was set explicitly */
-};
-
-
-/*
-** Convert zDate into one or more integers according to the conversion
-** specifier zFormat.
-**
-** zFormat[] contains 4 characters for each integer converted, except for
-** the last integer which is specified by three characters. The meaning
-** of a four-character format specifiers ABCD is:
-**
-** A: number of digits to convert. Always "2" or "4".
-** B: minimum value. Always "0" or "1".
-** C: maximum value, decoded as:
-** a: 12
-** b: 14
-** c: 24
-** d: 31
-** e: 59
-** f: 9999
-** D: the separator character, or \000 to indicate this is the
-** last number to convert.
-**
-** Example: To translate an ISO-8601 date YYYY-MM-DD, the format would
-** be "40f-21a-20c". The "40f-" indicates the 4-digit year followed by "-".
-** The "21a-" indicates the 2-digit month followed by "-". The "20c" indicates
-** the 2-digit day which is the last integer in the set.
-**
-** The function returns the number of successful conversions.
-*/
-static int getDigits(const char *zDate, const char *zFormat, ...){
- /* The aMx[] array translates the 3rd character of each format
- ** spec into a max size: a b c d e f */
- static const u16 aMx[] = { 12, 14, 24, 31, 59, 9999 };
- va_list ap;
- int cnt = 0;
- char nextC;
- va_start(ap, zFormat);
- do{
- char N = zFormat[0] - '0';
- char min = zFormat[1] - '0';
- int val = 0;
- u16 max;
-
- assert( zFormat[2]>='a' && zFormat[2]<='f' );
- max = aMx[zFormat[2] - 'a'];
- nextC = zFormat[3];
- val = 0;
- while( N-- ){
- if( !sqlite3Isdigit(*zDate) ){
- goto end_getDigits;
- }
- val = val*10 + *zDate - '0';
- zDate++;
- }
- if( val<(int)min || val>(int)max || (nextC!=0 && nextC!=*zDate) ){
- goto end_getDigits;
- }
- *va_arg(ap,int*) = val;
- zDate++;
- cnt++;
- zFormat += 4;
- }while( nextC );
-end_getDigits:
- va_end(ap);
- return cnt;
-}
-
-/*
-** Parse a timezone extension on the end of a date-time.
-** The extension is of the form:
-**
-** (+/-)HH:MM
-**
-** Or the "zulu" notation:
-**
-** Z
-**
-** If the parse is successful, write the number of minutes
-** of change in p->tz and return 0. If a parser error occurs,
-** return non-zero.
-**
-** A missing specifier is not considered an error.
-*/
-static int parseTimezone(const char *zDate, DateTime *p){
- int sgn = 0;
- int nHr, nMn;
- int c;
- while( sqlite3Isspace(*zDate) ){ zDate++; }
- p->tz = 0;
- c = *zDate;
- if( c=='-' ){
- sgn = -1;
- }else if( c=='+' ){
- sgn = +1;
- }else if( c=='Z' || c=='z' ){
- zDate++;
- goto zulu_time;
- }else{
- return c!=0;
- }
- zDate++;
- if( getDigits(zDate, "20b:20e", &nHr, &nMn)!=2 ){
- return 1;
- }
- zDate += 5;
- p->tz = sgn*(nMn + nHr*60);
-zulu_time:
- while( sqlite3Isspace(*zDate) ){ zDate++; }
- p->tzSet = 1;
- return *zDate!=0;
-}
-
-/*
-** Parse times of the form HH:MM or HH:MM:SS or HH:MM:SS.FFFF.
-** The HH, MM, and SS must each be exactly 2 digits. The
-** fractional seconds FFFF can be one or more digits.
-**
-** Return 1 if there is a parsing error and 0 on success.
-*/
-static int parseHhMmSs(const char *zDate, DateTime *p){
- int h, m, s;
- double ms = 0.0;
- if( getDigits(zDate, "20c:20e", &h, &m)!=2 ){
- return 1;
- }
- zDate += 5;
- if( *zDate==':' ){
- zDate++;
- if( getDigits(zDate, "20e", &s)!=1 ){
- return 1;
- }
- zDate += 2;
- if( *zDate=='.' && sqlite3Isdigit(zDate[1]) ){
- double rScale = 1.0;
- zDate++;
- while( sqlite3Isdigit(*zDate) ){
- ms = ms*10.0 + *zDate - '0';
- rScale *= 10.0;
- zDate++;
- }
- ms /= rScale;
- }
- }else{
- s = 0;
- }
- p->validJD = 0;
- p->validHMS = 1;
- p->h = h;
- p->m = m;
- p->s = s + ms;
- if( parseTimezone(zDate, p) ) return 1;
- p->validTZ = (p->tz!=0)?1:0;
- return 0;
-}
-
-/*
-** Convert from YYYY-MM-DD HH:MM:SS to julian day. We always assume
-** that the YYYY-MM-DD is according to the Gregorian calendar.
-**
-** Reference: Meeus page 61
-*/
-static void computeJD(DateTime *p){
- int Y, M, D, A, B, X1, X2;
-
- if( p->validJD ) return;
- if( p->validYMD ){
- Y = p->Y;
- M = p->M;
- D = p->D;
- }else{
- Y = 2000; /* If no YMD specified, assume 2000-Jan-01 */
- M = 1;
- D = 1;
- }
- if( M<=2 ){
- Y--;
- M += 12;
- }
- A = Y/100;
- B = 2 - A + (A/4);
- X1 = 36525*(Y+4716)/100;
- X2 = 306001*(M+1)/10000;
- p->iJD = (sqlite3_int64)((X1 + X2 + D + B - 1524.5 ) * 86400000);
- p->validJD = 1;
- if( p->validHMS ){
- p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000);
- if( p->validTZ ){
- p->iJD -= p->tz*60000;
- p->validYMD = 0;
- p->validHMS = 0;
- p->validTZ = 0;
- }
- }
-}
-
-/*
-** Parse dates of the form
-**
-** YYYY-MM-DD HH:MM:SS.FFF
-** YYYY-MM-DD HH:MM:SS
-** YYYY-MM-DD HH:MM
-** YYYY-MM-DD
-**
-** Write the result into the DateTime structure and return 0
-** on success and 1 if the input string is not a well-formed
-** date.
-*/
-static int parseYyyyMmDd(const char *zDate, DateTime *p){
- int Y, M, D, neg;
-
- if( zDate[0]=='-' ){
- zDate++;
- neg = 1;
- }else{
- neg = 0;
- }
- if( getDigits(zDate, "40f-21a-21d", &Y, &M, &D)!=3 ){
- return 1;
- }
- zDate += 10;
- while( sqlite3Isspace(*zDate) || 'T'==*(u8*)zDate ){ zDate++; }
- if( parseHhMmSs(zDate, p)==0 ){
- /* We got the time */
- }else if( *zDate==0 ){
- p->validHMS = 0;
- }else{
- return 1;
- }
- p->validJD = 0;
- p->validYMD = 1;
- p->Y = neg ? -Y : Y;
- p->M = M;
- p->D = D;
- if( p->validTZ ){
- computeJD(p);
- }
- return 0;
-}
-
-/*
-** Set the time to the current time reported by the VFS.
-**
-** Return the number of errors.
-*/
-static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){
- p->iJD = sqlite3StmtCurrentTime(context);
- if( p->iJD>0 ){
- p->validJD = 1;
- return 0;
- }else{
- return 1;
- }
-}
-
-/*
-** Attempt to parse the given string into a julian day number. Return
-** the number of errors.
-**
-** The following are acceptable forms for the input string:
-**
-** YYYY-MM-DD HH:MM:SS.FFF +/-HH:MM
-** DDDD.DD
-** now
-**
-** In the first form, the +/-HH:MM is always optional. The fractional
-** seconds extension (the ".FFF") is optional. The seconds portion
-** (":SS.FFF") is option. The year and date can be omitted as long
-** as there is a time string. The time string can be omitted as long
-** as there is a year and date.
-*/
-static int parseDateOrTime(
- sqlite3_context *context,
- const char *zDate,
- DateTime *p
-){
- double r;
- if( parseYyyyMmDd(zDate,p)==0 ){
- return 0;
- }else if( parseHhMmSs(zDate, p)==0 ){
- return 0;
- }else if( sqlite3StrICmp(zDate,"now")==0){
- return setDateTimeToCurrent(context, p);
- }else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8) ){
- p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5);
- p->validJD = 1;
- return 0;
- }
- return 1;
-}
-
-/*
-** Compute the Year, Month, and Day from the julian day number.
-*/
-static void computeYMD(DateTime *p){
- int Z, A, B, C, D, E, X1;
- if( p->validYMD ) return;
- if( !p->validJD ){
- p->Y = 2000;
- p->M = 1;
- p->D = 1;
- }else{
- Z = (int)((p->iJD + 43200000)/86400000);
- A = (int)((Z - 1867216.25)/36524.25);
- A = Z + 1 + A - (A/4);
- B = A + 1524;
- C = (int)((B - 122.1)/365.25);
- D = (36525*(C&32767))/100;
- E = (int)((B-D)/30.6001);
- X1 = (int)(30.6001*E);
- p->D = B - D - X1;
- p->M = E<14 ? E-1 : E-13;
- p->Y = p->M>2 ? C - 4716 : C - 4715;
- }
- p->validYMD = 1;
-}
-
-/*
-** Compute the Hour, Minute, and Seconds from the julian day number.
-*/
-static void computeHMS(DateTime *p){
- int s;
- if( p->validHMS ) return;
- computeJD(p);
- s = (int)((p->iJD + 43200000) % 86400000);
- p->s = s/1000.0;
- s = (int)p->s;
- p->s -= s;
- p->h = s/3600;
- s -= p->h*3600;
- p->m = s/60;
- p->s += s - p->m*60;
- p->validHMS = 1;
-}
-
-/*
-** Compute both YMD and HMS
-*/
-static void computeYMD_HMS(DateTime *p){
- computeYMD(p);
- computeHMS(p);
-}
-
-/*
-** Clear the YMD and HMS and the TZ
-*/
-static void clearYMD_HMS_TZ(DateTime *p){
- p->validYMD = 0;
- p->validHMS = 0;
- p->validTZ = 0;
-}
-
-#ifndef SQLITE_OMIT_LOCALTIME
-/*
-** On recent Windows platforms, the localtime_s() function is available
-** as part of the "Secure CRT". It is essentially equivalent to
-** localtime_r() available under most POSIX platforms, except that the
-** order of the parameters is reversed.
-**
-** See http://msdn.microsoft.com/en-us/library/a442x3ye(VS.80).aspx.
-**
-** If the user has not indicated to use localtime_r() or localtime_s()
-** already, check for an MSVC build environment that provides
-** localtime_s().
-*/
-#if !HAVE_LOCALTIME_R && !HAVE_LOCALTIME_S \
- && defined(_MSC_VER) && defined(_CRT_INSECURE_DEPRECATE)
-#undef HAVE_LOCALTIME_S
-#define HAVE_LOCALTIME_S 1
-#endif
-
-/*
-** The following routine implements the rough equivalent of localtime_r()
-** using whatever operating-system specific localtime facility that
-** is available. This routine returns 0 on success and
-** non-zero on any kind of error.
-**
-** If the sqlite3GlobalConfig.bLocaltimeFault variable is true then this
-** routine will always fail.
-**
-** EVIDENCE-OF: R-62172-00036 In this implementation, the standard C
-** library function localtime_r() is used to assist in the calculation of
-** local time.
-*/
-static int osLocaltime(time_t *t, struct tm *pTm){
- int rc;
-#if !HAVE_LOCALTIME_R && !HAVE_LOCALTIME_S
- struct tm *pX;
-#if SQLITE_THREADSAFE>0
- sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
-#endif
- sqlite3_mutex_enter(mutex);
- pX = localtime(t);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
- if( sqlite3GlobalConfig.bLocaltimeFault ) pX = 0;
-#endif
- if( pX ) *pTm = *pX;
- sqlite3_mutex_leave(mutex);
- rc = pX==0;
-#else
-#ifndef SQLITE_OMIT_BUILTIN_TEST
- if( sqlite3GlobalConfig.bLocaltimeFault ) return 1;
-#endif
-#if HAVE_LOCALTIME_R
- rc = localtime_r(t, pTm)==0;
-#else
- rc = localtime_s(pTm, t);
-#endif /* HAVE_LOCALTIME_R */
-#endif /* HAVE_LOCALTIME_R || HAVE_LOCALTIME_S */
- return rc;
-}
-#endif /* SQLITE_OMIT_LOCALTIME */
-
-
-#ifndef SQLITE_OMIT_LOCALTIME
-/*
-** Compute the difference (in milliseconds) between localtime and UTC
-** (a.k.a. GMT) for the time value p where p is in UTC. If no error occurs,
-** return this value and set *pRc to SQLITE_OK.
-**
-** Or, if an error does occur, set *pRc to SQLITE_ERROR. The returned value
-** is undefined in this case.
-*/
-static sqlite3_int64 localtimeOffset(
- DateTime *p, /* Date at which to calculate offset */
- sqlite3_context *pCtx, /* Write error here if one occurs */
- int *pRc /* OUT: Error code. SQLITE_OK or ERROR */
-){
- DateTime x, y;
- time_t t;
- struct tm sLocal;
-
- /* Initialize the contents of sLocal to avoid a compiler warning. */
- memset(&sLocal, 0, sizeof(sLocal));
-
- x = *p;
- computeYMD_HMS(&x);
- if( x.Y<1971 || x.Y>=2038 ){
- /* EVIDENCE-OF: R-55269-29598 The localtime_r() C function normally only
- ** works for years between 1970 and 2037. For dates outside this range,
- ** SQLite attempts to map the year into an equivalent year within this
- ** range, do the calculation, then map the year back.
- */
- x.Y = 2000;
- x.M = 1;
- x.D = 1;
- x.h = 0;
- x.m = 0;
- x.s = 0.0;
- } else {
- int s = (int)(x.s + 0.5);
- x.s = s;
- }
- x.tz = 0;
- x.validJD = 0;
- computeJD(&x);
- t = (time_t)(x.iJD/1000 - 21086676*(i64)10000);
- if( osLocaltime(&t, &sLocal) ){
- sqlite3_result_error(pCtx, "local time unavailable", -1);
- *pRc = SQLITE_ERROR;
- return 0;
- }
- y.Y = sLocal.tm_year + 1900;
- y.M = sLocal.tm_mon + 1;
- y.D = sLocal.tm_mday;
- y.h = sLocal.tm_hour;
- y.m = sLocal.tm_min;
- y.s = sLocal.tm_sec;
- y.validYMD = 1;
- y.validHMS = 1;
- y.validJD = 0;
- y.validTZ = 0;
- computeJD(&y);
- *pRc = SQLITE_OK;
- return y.iJD - x.iJD;
-}
-#endif /* SQLITE_OMIT_LOCALTIME */
-
-/*
-** Process a modifier to a date-time stamp. The modifiers are
-** as follows:
-**
-** NNN days
-** NNN hours
-** NNN minutes
-** NNN.NNNN seconds
-** NNN months
-** NNN years
-** start of month
-** start of year
-** start of week
-** start of day
-** weekday N
-** unixepoch
-** localtime
-** utc
-**
-** Return 0 on success and 1 if there is any kind of error. If the error
-** is in a system call (i.e. localtime()), then an error message is written
-** to context pCtx. If the error is an unrecognized modifier, no error is
-** written to pCtx.
-*/
-static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){
- int rc = 1;
- int n;
- double r;
- char *z, zBuf[30];
- z = zBuf;
- for(n=0; niJD += localtimeOffset(p, pCtx, &rc);
- clearYMD_HMS_TZ(p);
- }
- break;
- }
-#endif
- case 'u': {
- /*
- ** unixepoch
- **
- ** Treat the current value of p->iJD as the number of
- ** seconds since 1970. Convert to a real julian day number.
- */
- if( strcmp(z, "unixepoch")==0 && p->validJD ){
- p->iJD = (p->iJD + 43200)/86400 + 21086676*(i64)10000000;
- clearYMD_HMS_TZ(p);
- rc = 0;
- }
-#ifndef SQLITE_OMIT_LOCALTIME
- else if( strcmp(z, "utc")==0 ){
- if( p->tzSet==0 ){
- sqlite3_int64 c1;
- computeJD(p);
- c1 = localtimeOffset(p, pCtx, &rc);
- if( rc==SQLITE_OK ){
- p->iJD -= c1;
- clearYMD_HMS_TZ(p);
- p->iJD += c1 - localtimeOffset(p, pCtx, &rc);
- }
- p->tzSet = 1;
- }else{
- rc = SQLITE_OK;
- }
- }
-#endif
- break;
- }
- case 'w': {
- /*
- ** weekday N
- **
- ** Move the date to the same time on the next occurrence of
- ** weekday N where 0==Sunday, 1==Monday, and so forth. If the
- ** date is already on the appropriate weekday, this is a no-op.
- */
- if( strncmp(z, "weekday ", 8)==0
- && sqlite3AtoF(&z[8], &r, sqlite3Strlen30(&z[8]), SQLITE_UTF8)
- && (n=(int)r)==r && n>=0 && r<7 ){
- sqlite3_int64 Z;
- computeYMD_HMS(p);
- p->validTZ = 0;
- p->validJD = 0;
- computeJD(p);
- Z = ((p->iJD + 129600000)/86400000) % 7;
- if( Z>n ) Z -= 7;
- p->iJD += (n - Z)*86400000;
- clearYMD_HMS_TZ(p);
- rc = 0;
- }
- break;
- }
- case 's': {
- /*
- ** start of TTTTT
- **
- ** Move the date backwards to the beginning of the current day,
- ** or month or year.
- */
- if( strncmp(z, "start of ", 9)!=0 ) break;
- z += 9;
- computeYMD(p);
- p->validHMS = 1;
- p->h = p->m = 0;
- p->s = 0.0;
- p->validTZ = 0;
- p->validJD = 0;
- if( strcmp(z,"month")==0 ){
- p->D = 1;
- rc = 0;
- }else if( strcmp(z,"year")==0 ){
- computeYMD(p);
- p->M = 1;
- p->D = 1;
- rc = 0;
- }else if( strcmp(z,"day")==0 ){
- rc = 0;
- }
- break;
- }
- case '+':
- case '-':
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9': {
- double rRounder;
- for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){}
- if( !sqlite3AtoF(z, &r, n, SQLITE_UTF8) ){
- rc = 1;
- break;
- }
- if( z[n]==':' ){
- /* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the
- ** specified number of hours, minutes, seconds, and fractional seconds
- ** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be
- ** omitted.
- */
- const char *z2 = z;
- DateTime tx;
- sqlite3_int64 day;
- if( !sqlite3Isdigit(*z2) ) z2++;
- memset(&tx, 0, sizeof(tx));
- if( parseHhMmSs(z2, &tx) ) break;
- computeJD(&tx);
- tx.iJD -= 43200000;
- day = tx.iJD/86400000;
- tx.iJD -= day*86400000;
- if( z[0]=='-' ) tx.iJD = -tx.iJD;
- computeJD(p);
- clearYMD_HMS_TZ(p);
- p->iJD += tx.iJD;
- rc = 0;
- break;
- }
- z += n;
- while( sqlite3Isspace(*z) ) z++;
- n = sqlite3Strlen30(z);
- if( n>10 || n<3 ) break;
- if( z[n-1]=='s' ){ z[n-1] = 0; n--; }
- computeJD(p);
- rc = 0;
- rRounder = r<0 ? -0.5 : +0.5;
- if( n==3 && strcmp(z,"day")==0 ){
- p->iJD += (sqlite3_int64)(r*86400000.0 + rRounder);
- }else if( n==4 && strcmp(z,"hour")==0 ){
- p->iJD += (sqlite3_int64)(r*(86400000.0/24.0) + rRounder);
- }else if( n==6 && strcmp(z,"minute")==0 ){
- p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0)) + rRounder);
- }else if( n==6 && strcmp(z,"second")==0 ){
- p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0*60.0)) + rRounder);
- }else if( n==5 && strcmp(z,"month")==0 ){
- int x, y;
- computeYMD_HMS(p);
- p->M += (int)r;
- x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12;
- p->Y += x;
- p->M -= x*12;
- p->validJD = 0;
- computeJD(p);
- y = (int)r;
- if( y!=r ){
- p->iJD += (sqlite3_int64)((r - y)*30.0*86400000.0 + rRounder);
- }
- }else if( n==4 && strcmp(z,"year")==0 ){
- int y = (int)r;
- computeYMD_HMS(p);
- p->Y += y;
- p->validJD = 0;
- computeJD(p);
- if( y!=r ){
- p->iJD += (sqlite3_int64)((r - y)*365.0*86400000.0 + rRounder);
- }
- }else{
- rc = 1;
- }
- clearYMD_HMS_TZ(p);
- break;
- }
- default: {
- break;
- }
- }
- return rc;
-}
-
-/*
-** Process time function arguments. argv[0] is a date-time stamp.
-** argv[1] and following are modifiers. Parse them all and write
-** the resulting time into the DateTime structure p. Return 0
-** on success and 1 if there are any errors.
-**
-** If there are zero parameters (if even argv[0] is undefined)
-** then assume a default value of "now" for argv[0].
-*/
-static int isDate(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv,
- DateTime *p
-){
- int i;
- const unsigned char *z;
- int eType;
- memset(p, 0, sizeof(*p));
- if( argc==0 ){
- return setDateTimeToCurrent(context, p);
- }
- if( (eType = sqlite3_value_type(argv[0]))==SQLITE_FLOAT
- || eType==SQLITE_INTEGER ){
- p->iJD = (sqlite3_int64)(sqlite3_value_double(argv[0])*86400000.0 + 0.5);
- p->validJD = 1;
- }else{
- z = sqlite3_value_text(argv[0]);
- if( !z || parseDateOrTime(context, (char*)z, p) ){
- return 1;
- }
- }
- for(i=1; iaLimit[SQLITE_LIMIT_LENGTH]+1 );
- testcase( n==(u64)db->aLimit[SQLITE_LIMIT_LENGTH] );
- if( n(u64)db->aLimit[SQLITE_LIMIT_LENGTH] ){
- sqlite3_result_error_toobig(context);
- return;
- }else{
- z = sqlite3DbMallocRawNN(db, (int)n);
- if( z==0 ){
- sqlite3_result_error_nomem(context);
- return;
- }
- }
- computeJD(&x);
- computeYMD_HMS(&x);
- for(i=j=0; zFmt[i]; i++){
- if( zFmt[i]!='%' ){
- z[j++] = zFmt[i];
- }else{
- i++;
- switch( zFmt[i] ){
- case 'd': sqlite3_snprintf(3, &z[j],"%02d",x.D); j+=2; break;
- case 'f': {
- double s = x.s;
- if( s>59.999 ) s = 59.999;
- sqlite3_snprintf(7, &z[j],"%06.3f", s);
- j += sqlite3Strlen30(&z[j]);
- break;
- }
- case 'H': sqlite3_snprintf(3, &z[j],"%02d",x.h); j+=2; break;
- case 'W': /* Fall thru */
- case 'j': {
- int nDay; /* Number of days since 1st day of year */
- DateTime y = x;
- y.validJD = 0;
- y.M = 1;
- y.D = 1;
- computeJD(&y);
- nDay = (int)((x.iJD-y.iJD+43200000)/86400000);
- if( zFmt[i]=='W' ){
- int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */
- wd = (int)(((x.iJD+43200000)/86400000)%7);
- sqlite3_snprintf(3, &z[j],"%02d",(nDay+7-wd)/7);
- j += 2;
- }else{
- sqlite3_snprintf(4, &z[j],"%03d",nDay+1);
- j += 3;
- }
- break;
- }
- case 'J': {
- sqlite3_snprintf(20, &z[j],"%.16g",x.iJD/86400000.0);
- j+=sqlite3Strlen30(&z[j]);
- break;
- }
- case 'm': sqlite3_snprintf(3, &z[j],"%02d",x.M); j+=2; break;
- case 'M': sqlite3_snprintf(3, &z[j],"%02d",x.m); j+=2; break;
- case 's': {
- sqlite3_snprintf(30,&z[j],"%lld",
- (i64)(x.iJD/1000 - 21086676*(i64)10000));
- j += sqlite3Strlen30(&z[j]);
- break;
- }
- case 'S': sqlite3_snprintf(3,&z[j],"%02d",(int)x.s); j+=2; break;
- case 'w': {
- z[j++] = (char)(((x.iJD+129600000)/86400000) % 7) + '0';
- break;
- }
- case 'Y': {
- sqlite3_snprintf(5,&z[j],"%04d",x.Y); j+=sqlite3Strlen30(&z[j]);
- break;
- }
- default: z[j++] = '%'; break;
- }
- }
- }
- z[j] = 0;
- sqlite3_result_text(context, z, -1,
- z==zBuf ? SQLITE_TRANSIENT : SQLITE_DYNAMIC);
-}
-
-/*
-** current_time()
-**
-** This function returns the same value as time('now').
-*/
-static void ctimeFunc(
- sqlite3_context *context,
- int NotUsed,
- sqlite3_value **NotUsed2
-){
- UNUSED_PARAMETER2(NotUsed, NotUsed2);
- timeFunc(context, 0, 0);
-}
-
-/*
-** current_date()
-**
-** This function returns the same value as date('now').
-*/
-static void cdateFunc(
- sqlite3_context *context,
- int NotUsed,
- sqlite3_value **NotUsed2
-){
- UNUSED_PARAMETER2(NotUsed, NotUsed2);
- dateFunc(context, 0, 0);
-}
-
-/*
-** current_timestamp()
-**
-** This function returns the same value as datetime('now').
-*/
-static void ctimestampFunc(
- sqlite3_context *context,
- int NotUsed,
- sqlite3_value **NotUsed2
-){
- UNUSED_PARAMETER2(NotUsed, NotUsed2);
- datetimeFunc(context, 0, 0);
-}
-#endif /* !defined(SQLITE_OMIT_DATETIME_FUNCS) */
-
-#ifdef SQLITE_OMIT_DATETIME_FUNCS
-/*
-** If the library is compiled to omit the full-scale date and time
-** handling (to get a smaller binary), the following minimal version
-** of the functions current_time(), current_date() and current_timestamp()
-** are included instead. This is to support column declarations that
-** include "DEFAULT CURRENT_TIME" etc.
-**
-** This function uses the C-library functions time(), gmtime()
-** and strftime(). The format string to pass to strftime() is supplied
-** as the user-data for the function.
-*/
-static void currentTimeFunc(
- sqlite3_context *context,
- int argc,
- sqlite3_value **argv
-){
- time_t t;
- char *zFormat = (char *)sqlite3_user_data(context);
- sqlite3_int64 iT;
- struct tm *pTm;
- struct tm sNow;
- char zBuf[20];
-
- UNUSED_PARAMETER(argc);
- UNUSED_PARAMETER(argv);
-
- iT = sqlite3StmtCurrentTime(context);
- if( iT<=0 ) return;
- t = iT/1000 - 10000*(sqlite3_int64)21086676;
-#if HAVE_GMTIME_R
- pTm = gmtime_r(&t, &sNow);
-#else
- sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER));
- pTm = gmtime(&t);
- if( pTm ) memcpy(&sNow, pTm, sizeof(sNow));
- sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER));
-#endif
- if( pTm ){
- strftime(zBuf, 20, zFormat, &sNow);
- sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT);
- }
-}
-#endif
-
-/*
-** This function registered all of the above C functions as SQL
-** functions. This should be the only routine in this file with
-** external linkage.
-*/
-SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){
- static FuncDef aDateTimeFuncs[] = {
-#ifndef SQLITE_OMIT_DATETIME_FUNCS
- DFUNCTION(julianday, -1, 0, 0, juliandayFunc ),
- DFUNCTION(date, -1, 0, 0, dateFunc ),
- DFUNCTION(time, -1, 0, 0, timeFunc ),
- DFUNCTION(datetime, -1, 0, 0, datetimeFunc ),
- DFUNCTION(strftime, -1, 0, 0, strftimeFunc ),
- DFUNCTION(current_time, 0, 0, 0, ctimeFunc ),
- DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc),
- DFUNCTION(current_date, 0, 0, 0, cdateFunc ),
-#else
- STR_FUNCTION(current_time, 0, "%H:%M:%S", 0, currentTimeFunc),
- STR_FUNCTION(current_date, 0, "%Y-%m-%d", 0, currentTimeFunc),
- STR_FUNCTION(current_timestamp, 0, "%Y-%m-%d %H:%M:%S", 0, currentTimeFunc),
-#endif
- };
- sqlite3InsertBuiltinFuncs(aDateTimeFuncs, ArraySize(aDateTimeFuncs));
-}
-
-/************** End of date.c ************************************************/
-/************** Begin file os.c **********************************************/
-/*
-** 2005 November 29
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains OS interface code that is common to all
-** architectures.
-*/
-/* #include "sqliteInt.h" */
-
-/*
-** If we compile with the SQLITE_TEST macro set, then the following block
-** of code will give us the ability to simulate a disk I/O error. This
-** is used for testing the I/O recovery logic.
-*/
-#if defined(SQLITE_TEST)
-SQLITE_API int sqlite3_io_error_hit = 0; /* Total number of I/O Errors */
-SQLITE_API int sqlite3_io_error_hardhit = 0; /* Number of non-benign errors */
-SQLITE_API int sqlite3_io_error_pending = 0; /* Count down to first I/O error */
-SQLITE_API int sqlite3_io_error_persist = 0; /* True if I/O errors persist */
-SQLITE_API int sqlite3_io_error_benign = 0; /* True if errors are benign */
-SQLITE_API int sqlite3_diskfull_pending = 0;
-SQLITE_API int sqlite3_diskfull = 0;
-#endif /* defined(SQLITE_TEST) */
-
-/*
-** When testing, also keep a count of the number of open files.
-*/
-#if defined(SQLITE_TEST)
-SQLITE_API int sqlite3_open_file_count = 0;
-#endif /* defined(SQLITE_TEST) */
-
-/*
-** The default SQLite sqlite3_vfs implementations do not allocate
-** memory (actually, os_unix.c allocates a small amount of memory
-** from within OsOpen()), but some third-party implementations may.
-** So we test the effects of a malloc() failing and the sqlite3OsXXX()
-** function returning SQLITE_IOERR_NOMEM using the DO_OS_MALLOC_TEST macro.
-**
-** The following functions are instrumented for malloc() failure
-** testing:
-**
-** sqlite3OsRead()
-** sqlite3OsWrite()
-** sqlite3OsSync()
-** sqlite3OsFileSize()
-** sqlite3OsLock()
-** sqlite3OsCheckReservedLock()
-** sqlite3OsFileControl()
-** sqlite3OsShmMap()
-** sqlite3OsOpen()
-** sqlite3OsDelete()
-** sqlite3OsAccess()
-** sqlite3OsFullPathname()
-**
-*/
-#if defined(SQLITE_TEST)
-SQLITE_API int sqlite3_memdebug_vfs_oom_test = 1;
- #define DO_OS_MALLOC_TEST(x) \
- if (sqlite3_memdebug_vfs_oom_test && (!x || !sqlite3JournalIsInMemory(x))) { \
- void *pTstAlloc = sqlite3Malloc(10); \
- if (!pTstAlloc) return SQLITE_IOERR_NOMEM_BKPT; \
- sqlite3_free(pTstAlloc); \
- }
-#else
- #define DO_OS_MALLOC_TEST(x)
-#endif
-
-/*
-** The following routines are convenience wrappers around methods
-** of the sqlite3_file object. This is mostly just syntactic sugar. All
-** of this would be completely automatic if SQLite were coded using
-** C++ instead of plain old C.
-*/
-SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file *pId){
- if( pId->pMethods ){
- pId->pMethods->xClose(pId);
- pId->pMethods = 0;
- }
-}
-SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file *id, void *pBuf, int amt, i64 offset){
- DO_OS_MALLOC_TEST(id);
- return id->pMethods->xRead(id, pBuf, amt, offset);
-}
-SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file *id, const void *pBuf, int amt, i64 offset){
- DO_OS_MALLOC_TEST(id);
- return id->pMethods->xWrite(id, pBuf, amt, offset);
-}
-SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file *id, i64 size){
- return id->pMethods->xTruncate(id, size);
-}
-SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file *id, int flags){
- DO_OS_MALLOC_TEST(id);
- return id->pMethods->xSync(id, flags);
-}
-SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file *id, i64 *pSize){
- DO_OS_MALLOC_TEST(id);
- return id->pMethods->xFileSize(id, pSize);
-}
-SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file *id, int lockType){
- DO_OS_MALLOC_TEST(id);
- return id->pMethods->xLock(id, lockType);
-}
-SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file *id, int lockType){
- return id->pMethods->xUnlock(id, lockType);
-}
-SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){
- DO_OS_MALLOC_TEST(id);
- return id->pMethods->xCheckReservedLock(id, pResOut);
-}
-
-/*
-** Use sqlite3OsFileControl() when we are doing something that might fail
-** and we need to know about the failures. Use sqlite3OsFileControlHint()
-** when simply tossing information over the wall to the VFS and we do not
-** really care if the VFS receives and understands the information since it
-** is only a hint and can be safely ignored. The sqlite3OsFileControlHint()
-** routine has no return value since the return value would be meaningless.
-*/
-SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){
-#ifdef SQLITE_TEST
- if( op!=SQLITE_FCNTL_COMMIT_PHASETWO ){
- /* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite
- ** is using a regular VFS, it is called after the corresponding
- ** transaction has been committed. Injecting a fault at this point
- ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM
- ** but the transaction is committed anyway.
- **
- ** The core must call OsFileControl() though, not OsFileControlHint(),
- ** as if a custom VFS (e.g. zipvfs) returns an error here, it probably
- ** means the commit really has failed and an error should be returned
- ** to the user. */
- DO_OS_MALLOC_TEST(id);
- }
-#endif
- return id->pMethods->xFileControl(id, op, pArg);
-}
-SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file *id, int op, void *pArg){
- (void)id->pMethods->xFileControl(id, op, pArg);
-}
-
-SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id){
- int (*xSectorSize)(sqlite3_file*) = id->pMethods->xSectorSize;
- return (xSectorSize ? xSectorSize(id) : SQLITE_DEFAULT_SECTOR_SIZE);
-}
-SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id){
- return id->pMethods->xDeviceCharacteristics(id);
-}
-SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int offset, int n, int flags){
- return id->pMethods->xShmLock(id, offset, n, flags);
-}
-SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id){
- id->pMethods->xShmBarrier(id);
-}
-SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int deleteFlag){
- return id->pMethods->xShmUnmap(id, deleteFlag);
-}
-SQLITE_PRIVATE int sqlite3OsShmMap(
- sqlite3_file *id, /* Database file handle */
- int iPage,
- int pgsz,
- int bExtend, /* True to extend file if necessary */
- void volatile **pp /* OUT: Pointer to mapping */
-){
- DO_OS_MALLOC_TEST(id);
- return id->pMethods->xShmMap(id, iPage, pgsz, bExtend, pp);
-}
-
-#if SQLITE_MAX_MMAP_SIZE>0
-/* The real implementation of xFetch and xUnfetch */
-SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){
- DO_OS_MALLOC_TEST(id);
- return id->pMethods->xFetch(id, iOff, iAmt, pp);
-}
-SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){
- return id->pMethods->xUnfetch(id, iOff, p);
-}
-#else
-/* No-op stubs to use when memory-mapped I/O is disabled */
-SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64 iOff, int iAmt, void **pp){
- *pp = 0;
- return SQLITE_OK;
-}
-SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *id, i64 iOff, void *p){
- return SQLITE_OK;
-}
-#endif
-
-/*
-** The next group of routines are convenience wrappers around the
-** VFS methods.
-*/
-SQLITE_PRIVATE int sqlite3OsOpen(
- sqlite3_vfs *pVfs,
- const char *zPath,
- sqlite3_file *pFile,
- int flags,
- int *pFlagsOut
-){
- int rc;
- DO_OS_MALLOC_TEST(0);
- /* 0x87f7f is a mask of SQLITE_OPEN_ flags that are valid to be passed
- ** down into the VFS layer. Some SQLITE_OPEN_ flags (for example,
- ** SQLITE_OPEN_FULLMUTEX or SQLITE_OPEN_SHAREDCACHE) are blocked before
- ** reaching the VFS. */
- rc = pVfs->xOpen(pVfs, zPath, pFile, flags & 0x87f7f, pFlagsOut);
- assert( rc==SQLITE_OK || pFile->pMethods==0 );
- return rc;
-}
-SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){
- DO_OS_MALLOC_TEST(0);
- assert( dirSync==0 || dirSync==1 );
- return pVfs->xDelete(pVfs, zPath, dirSync);
-}
-SQLITE_PRIVATE int sqlite3OsAccess(
- sqlite3_vfs *pVfs,
- const char *zPath,
- int flags,
- int *pResOut
-){
- DO_OS_MALLOC_TEST(0);
- return pVfs->xAccess(pVfs, zPath, flags, pResOut);
-}
-SQLITE_PRIVATE int sqlite3OsFullPathname(
- sqlite3_vfs *pVfs,
- const char *zPath,
- int nPathOut,
- char *zPathOut
-){
- DO_OS_MALLOC_TEST(0);
- zPathOut[0] = 0;
- return pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut);
-}
-#ifndef SQLITE_OMIT_LOAD_EXTENSION
-SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *pVfs, const char *zPath){
- return pVfs->xDlOpen(pVfs, zPath);
-}
-SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *pVfs, int nByte, char *zBufOut){
- pVfs->xDlError(pVfs, nByte, zBufOut);
-}
-SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *pVfs, void *pHdle, const char *zSym))(void){
- return pVfs->xDlSym(pVfs, pHdle, zSym);
-}
-SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *pVfs, void *pHandle){
- pVfs->xDlClose(pVfs, pHandle);
-}
-#endif /* SQLITE_OMIT_LOAD_EXTENSION */
-SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *pVfs, int nByte, char *zBufOut){
- return pVfs->xRandomness(pVfs, nByte, zBufOut);
-}
-SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *pVfs, int nMicro){
- return pVfs->xSleep(pVfs, nMicro);
-}
-SQLITE_PRIVATE int sqlite3OsGetLastError(sqlite3_vfs *pVfs){
- return pVfs->xGetLastError ? pVfs->xGetLastError(pVfs, 0, 0) : 0;
-}
-SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *pTimeOut){
- int rc;
- /* IMPLEMENTATION-OF: R-49045-42493 SQLite will use the xCurrentTimeInt64()
- ** method to get the current date and time if that method is available
- ** (if iVersion is 2 or greater and the function pointer is not NULL) and
- ** will fall back to xCurrentTime() if xCurrentTimeInt64() is
- ** unavailable.
- */
- if( pVfs->iVersion>=2 && pVfs->xCurrentTimeInt64 ){
- rc = pVfs->xCurrentTimeInt64(pVfs, pTimeOut);
- }else{
- double r;
- rc = pVfs->xCurrentTime(pVfs, &r);
- *pTimeOut = (sqlite3_int64)(r*86400000.0);
- }
- return rc;
-}
-
-SQLITE_PRIVATE int sqlite3OsOpenMalloc(
- sqlite3_vfs *pVfs,
- const char *zFile,
- sqlite3_file **ppFile,
- int flags,
- int *pOutFlags
-){
- int rc;
- sqlite3_file *pFile;
- pFile = (sqlite3_file *)sqlite3MallocZero(pVfs->szOsFile);
- if( pFile ){
- rc = sqlite3OsOpen(pVfs, zFile, pFile, flags, pOutFlags);
- if( rc!=SQLITE_OK ){
- sqlite3_free(pFile);
- }else{
- *ppFile = pFile;
- }
- }else{
- rc = SQLITE_NOMEM_BKPT;
- }
- return rc;
-}
-SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *pFile){
- assert( pFile );
- sqlite3OsClose(pFile);
- sqlite3_free(pFile);
-}
-
-/*
-** This function is a wrapper around the OS specific implementation of
-** sqlite3_os_init(). The purpose of the wrapper is to provide the
-** ability to simulate a malloc failure, so that the handling of an
-** error in sqlite3_os_init() by the upper layers can be tested.
-*/
-SQLITE_PRIVATE int sqlite3OsInit(void){
- void *p = sqlite3_malloc(10);
- if( p==0 ) return SQLITE_NOMEM_BKPT;
- sqlite3_free(p);
- return sqlite3_os_init();
-}
-
-/*
-** The list of all registered VFS implementations.
-*/
-static sqlite3_vfs * SQLITE_WSD vfsList = 0;
-#define vfsList GLOBAL(sqlite3_vfs *, vfsList)
-
-/*
-** Locate a VFS by name. If no name is given, simply return the
-** first VFS on the list.
-*/
-SQLITE_API sqlite3_vfs *SQLITE_STDCALL sqlite3_vfs_find(const char *zVfs){
- sqlite3_vfs *pVfs = 0;
-#if SQLITE_THREADSAFE
- sqlite3_mutex *mutex;
-#endif
-#ifndef SQLITE_OMIT_AUTOINIT
- int rc = sqlite3_initialize();
- if( rc ) return 0;
-#endif
-#if SQLITE_THREADSAFE
- mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
-#endif
- sqlite3_mutex_enter(mutex);
- for(pVfs = vfsList; pVfs; pVfs=pVfs->pNext){
- if( zVfs==0 ) break;
- if( strcmp(zVfs, pVfs->zName)==0 ) break;
- }
- sqlite3_mutex_leave(mutex);
- return pVfs;
-}
-
-/*
-** Unlink a VFS from the linked list
-*/
-static void vfsUnlink(sqlite3_vfs *pVfs){
- assert( sqlite3_mutex_held(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER)) );
- if( pVfs==0 ){
- /* No-op */
- }else if( vfsList==pVfs ){
- vfsList = pVfs->pNext;
- }else if( vfsList ){
- sqlite3_vfs *p = vfsList;
- while( p->pNext && p->pNext!=pVfs ){
- p = p->pNext;
- }
- if( p->pNext==pVfs ){
- p->pNext = pVfs->pNext;
- }
- }
-}
-
-/*
-** Register a VFS with the system. It is harmless to register the same
-** VFS multiple times. The new VFS becomes the default if makeDflt is
-** true.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_register(sqlite3_vfs *pVfs, int makeDflt){
- MUTEX_LOGIC(sqlite3_mutex *mutex;)
-#ifndef SQLITE_OMIT_AUTOINIT
- int rc = sqlite3_initialize();
- if( rc ) return rc;
-#endif
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( pVfs==0 ) return SQLITE_MISUSE_BKPT;
-#endif
-
- MUTEX_LOGIC( mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); )
- sqlite3_mutex_enter(mutex);
- vfsUnlink(pVfs);
- if( makeDflt || vfsList==0 ){
- pVfs->pNext = vfsList;
- vfsList = pVfs;
- }else{
- pVfs->pNext = vfsList->pNext;
- vfsList->pNext = pVfs;
- }
- assert(vfsList);
- sqlite3_mutex_leave(mutex);
- return SQLITE_OK;
-}
-
-/*
-** Unregister a VFS so that it is no longer accessible.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_vfs_unregister(sqlite3_vfs *pVfs){
-#if SQLITE_THREADSAFE
- sqlite3_mutex *mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);
-#endif
- sqlite3_mutex_enter(mutex);
- vfsUnlink(pVfs);
- sqlite3_mutex_leave(mutex);
- return SQLITE_OK;
-}
-
-/************** End of os.c **************************************************/
-/************** Begin file fault.c *******************************************/
-/*
-** 2008 Jan 22
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** This file contains code to support the concept of "benign"
-** malloc failures (when the xMalloc() or xRealloc() method of the
-** sqlite3_mem_methods structure fails to allocate a block of memory
-** and returns 0).
-**
-** Most malloc failures are non-benign. After they occur, SQLite
-** abandons the current operation and returns an error code (usually
-** SQLITE_NOMEM) to the user. However, sometimes a fault is not necessarily
-** fatal. For example, if a malloc fails while resizing a hash table, this
-** is completely recoverable simply by not carrying out the resize. The
-** hash table will continue to function normally. So a malloc failure
-** during a hash table resize is a benign fault.
-*/
-
-/* #include "sqliteInt.h" */
-
-#ifndef SQLITE_OMIT_BUILTIN_TEST
-
-/*
-** Global variables.
-*/
-typedef struct BenignMallocHooks BenignMallocHooks;
-static SQLITE_WSD struct BenignMallocHooks {
- void (*xBenignBegin)(void);
- void (*xBenignEnd)(void);
-} sqlite3Hooks = { 0, 0 };
-
-/* The "wsdHooks" macro will resolve to the appropriate BenignMallocHooks
-** structure. If writable static data is unsupported on the target,
-** we have to locate the state vector at run-time. In the more common
-** case where writable static data is supported, wsdHooks can refer directly
-** to the "sqlite3Hooks" state vector declared above.
-*/
-#ifdef SQLITE_OMIT_WSD
-# define wsdHooksInit \
- BenignMallocHooks *x = &GLOBAL(BenignMallocHooks,sqlite3Hooks)
-# define wsdHooks x[0]
-#else
-# define wsdHooksInit
-# define wsdHooks sqlite3Hooks
-#endif
-
-
-/*
-** Register hooks to call when sqlite3BeginBenignMalloc() and
-** sqlite3EndBenignMalloc() are called, respectively.
-*/
-SQLITE_PRIVATE void sqlite3BenignMallocHooks(
- void (*xBenignBegin)(void),
- void (*xBenignEnd)(void)
-){
- wsdHooksInit;
- wsdHooks.xBenignBegin = xBenignBegin;
- wsdHooks.xBenignEnd = xBenignEnd;
-}
-
-/*
-** This (sqlite3EndBenignMalloc()) is called by SQLite code to indicate that
-** subsequent malloc failures are benign. A call to sqlite3EndBenignMalloc()
-** indicates that subsequent malloc failures are non-benign.
-*/
-SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void){
- wsdHooksInit;
- if( wsdHooks.xBenignBegin ){
- wsdHooks.xBenignBegin();
- }
-}
-SQLITE_PRIVATE void sqlite3EndBenignMalloc(void){
- wsdHooksInit;
- if( wsdHooks.xBenignEnd ){
- wsdHooks.xBenignEnd();
- }
-}
-
-#endif /* #ifndef SQLITE_OMIT_BUILTIN_TEST */
-
-/************** End of fault.c ***********************************************/
-/************** Begin file mem0.c ********************************************/
-/*
-** 2008 October 28
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** This file contains a no-op memory allocation drivers for use when
-** SQLITE_ZERO_MALLOC is defined. The allocation drivers implemented
-** here always fail. SQLite will not operate with these drivers. These
-** are merely placeholders. Real drivers must be substituted using
-** sqlite3_config() before SQLite will operate.
-*/
-/* #include "sqliteInt.h" */
-
-/*
-** This version of the memory allocator is the default. It is
-** used when no other memory allocator is specified using compile-time
-** macros.
-*/
-#ifdef SQLITE_ZERO_MALLOC
-
-/*
-** No-op versions of all memory allocation routines
-*/
-static void *sqlite3MemMalloc(int nByte){ return 0; }
-static void sqlite3MemFree(void *pPrior){ return; }
-static void *sqlite3MemRealloc(void *pPrior, int nByte){ return 0; }
-static int sqlite3MemSize(void *pPrior){ return 0; }
-static int sqlite3MemRoundup(int n){ return n; }
-static int sqlite3MemInit(void *NotUsed){ return SQLITE_OK; }
-static void sqlite3MemShutdown(void *NotUsed){ return; }
-
-/*
-** This routine is the only routine in this file with external linkage.
-**
-** Populate the low-level memory allocation function pointers in
-** sqlite3GlobalConfig.m with pointers to the routines in this file.
-*/
-SQLITE_PRIVATE void sqlite3MemSetDefault(void){
- static const sqlite3_mem_methods defaultMethods = {
- sqlite3MemMalloc,
- sqlite3MemFree,
- sqlite3MemRealloc,
- sqlite3MemSize,
- sqlite3MemRoundup,
- sqlite3MemInit,
- sqlite3MemShutdown,
- 0
- };
- sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods);
-}
-
-#endif /* SQLITE_ZERO_MALLOC */
-
-/************** End of mem0.c ************************************************/
-/************** Begin file mem1.c ********************************************/
-/*
-** 2007 August 14
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** This file contains low-level memory allocation drivers for when
-** SQLite will use the standard C-library malloc/realloc/free interface
-** to obtain the memory it needs.
-**
-** This file contains implementations of the low-level memory allocation
-** routines specified in the sqlite3_mem_methods object. The content of
-** this file is only used if SQLITE_SYSTEM_MALLOC is defined. The
-** SQLITE_SYSTEM_MALLOC macro is defined automatically if neither the
-** SQLITE_MEMDEBUG nor the SQLITE_WIN32_MALLOC macros are defined. The
-** default configuration is to use memory allocation routines in this
-** file.
-**
-** C-preprocessor macro summary:
-**
-** HAVE_MALLOC_USABLE_SIZE The configure script sets this symbol if
-** the malloc_usable_size() interface exists
-** on the target platform. Or, this symbol
-** can be set manually, if desired.
-** If an equivalent interface exists by
-** a different name, using a separate -D
-** option to rename it.
-**
-** SQLITE_WITHOUT_ZONEMALLOC Some older macs lack support for the zone
-** memory allocator. Set this symbol to enable
-** building on older macs.
-**
-** SQLITE_WITHOUT_MSIZE Set this symbol to disable the use of
-** _msize() on windows systems. This might
-** be necessary when compiling for Delphi,
-** for example.
-*/
-/* #include "sqliteInt.h" */
-
-/*
-** This version of the memory allocator is the default. It is
-** used when no other memory allocator is specified using compile-time
-** macros.
-*/
-#ifdef SQLITE_SYSTEM_MALLOC
-#if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC)
-
-/*
-** Use the zone allocator available on apple products unless the
-** SQLITE_WITHOUT_ZONEMALLOC symbol is defined.
-*/
-#include
-#include
-#include
-static malloc_zone_t* _sqliteZone_;
-#define SQLITE_MALLOC(x) malloc_zone_malloc(_sqliteZone_, (x))
-#define SQLITE_FREE(x) malloc_zone_free(_sqliteZone_, (x));
-#define SQLITE_REALLOC(x,y) malloc_zone_realloc(_sqliteZone_, (x), (y))
-#define SQLITE_MALLOCSIZE(x) \
- (_sqliteZone_ ? _sqliteZone_->size(_sqliteZone_,x) : malloc_size(x))
-
-#else /* if not __APPLE__ */
-
-/*
-** Use standard C library malloc and free on non-Apple systems.
-** Also used by Apple systems if SQLITE_WITHOUT_ZONEMALLOC is defined.
-*/
-#define SQLITE_MALLOC(x) malloc(x)
-#define SQLITE_FREE(x) free(x)
-#define SQLITE_REALLOC(x,y) realloc((x),(y))
-
-/*
-** The malloc.h header file is needed for malloc_usable_size() function
-** on some systems (e.g. Linux).
-*/
-#if HAVE_MALLOC_H && HAVE_MALLOC_USABLE_SIZE
-# define SQLITE_USE_MALLOC_H 1
-# define SQLITE_USE_MALLOC_USABLE_SIZE 1
-/*
-** The MSVCRT has malloc_usable_size(), but it is called _msize(). The
-** use of _msize() is automatic, but can be disabled by compiling with
-** -DSQLITE_WITHOUT_MSIZE. Using the _msize() function also requires
-** the malloc.h header file.
-*/
-#elif defined(_MSC_VER) && !defined(SQLITE_WITHOUT_MSIZE)
-# define SQLITE_USE_MALLOC_H
-# define SQLITE_USE_MSIZE
-#endif
-
-/*
-** Include the malloc.h header file, if necessary. Also set define macro
-** SQLITE_MALLOCSIZE to the appropriate function name, which is _msize()
-** for MSVC and malloc_usable_size() for most other systems (e.g. Linux).
-** The memory size function can always be overridden manually by defining
-** the macro SQLITE_MALLOCSIZE to the desired function name.
-*/
-#if defined(SQLITE_USE_MALLOC_H)
-# include
-# if defined(SQLITE_USE_MALLOC_USABLE_SIZE)
-# if !defined(SQLITE_MALLOCSIZE)
-# define SQLITE_MALLOCSIZE(x) malloc_usable_size(x)
-# endif
-# elif defined(SQLITE_USE_MSIZE)
-# if !defined(SQLITE_MALLOCSIZE)
-# define SQLITE_MALLOCSIZE _msize
-# endif
-# endif
-#endif /* defined(SQLITE_USE_MALLOC_H) */
-
-#endif /* __APPLE__ or not __APPLE__ */
-
-/*
-** Like malloc(), but remember the size of the allocation
-** so that we can find it later using sqlite3MemSize().
-**
-** For this low-level routine, we are guaranteed that nByte>0 because
-** cases of nByte<=0 will be intercepted and dealt with by higher level
-** routines.
-*/
-static void *sqlite3MemMalloc(int nByte){
-#ifdef SQLITE_MALLOCSIZE
- void *p = SQLITE_MALLOC( nByte );
- if( p==0 ){
- testcase( sqlite3GlobalConfig.xLog!=0 );
- sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte);
- }
- return p;
-#else
- sqlite3_int64 *p;
- assert( nByte>0 );
- nByte = ROUND8(nByte);
- p = SQLITE_MALLOC( nByte+8 );
- if( p ){
- p[0] = nByte;
- p++;
- }else{
- testcase( sqlite3GlobalConfig.xLog!=0 );
- sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte);
- }
- return (void *)p;
-#endif
-}
-
-/*
-** Like free() but works for allocations obtained from sqlite3MemMalloc()
-** or sqlite3MemRealloc().
-**
-** For this low-level routine, we already know that pPrior!=0 since
-** cases where pPrior==0 will have been intecepted and dealt with
-** by higher-level routines.
-*/
-static void sqlite3MemFree(void *pPrior){
-#ifdef SQLITE_MALLOCSIZE
- SQLITE_FREE(pPrior);
-#else
- sqlite3_int64 *p = (sqlite3_int64*)pPrior;
- assert( pPrior!=0 );
- p--;
- SQLITE_FREE(p);
-#endif
-}
-
-/*
-** Report the allocated size of a prior return from xMalloc()
-** or xRealloc().
-*/
-static int sqlite3MemSize(void *pPrior){
-#ifdef SQLITE_MALLOCSIZE
- assert( pPrior!=0 );
- return (int)SQLITE_MALLOCSIZE(pPrior);
-#else
- sqlite3_int64 *p;
- assert( pPrior!=0 );
- p = (sqlite3_int64*)pPrior;
- p--;
- return (int)p[0];
-#endif
-}
-
-/*
-** Like realloc(). Resize an allocation previously obtained from
-** sqlite3MemMalloc().
-**
-** For this low-level interface, we know that pPrior!=0. Cases where
-** pPrior==0 while have been intercepted by higher-level routine and
-** redirected to xMalloc. Similarly, we know that nByte>0 because
-** cases where nByte<=0 will have been intercepted by higher-level
-** routines and redirected to xFree.
-*/
-static void *sqlite3MemRealloc(void *pPrior, int nByte){
-#ifdef SQLITE_MALLOCSIZE
- void *p = SQLITE_REALLOC(pPrior, nByte);
- if( p==0 ){
- testcase( sqlite3GlobalConfig.xLog!=0 );
- sqlite3_log(SQLITE_NOMEM,
- "failed memory resize %u to %u bytes",
- SQLITE_MALLOCSIZE(pPrior), nByte);
- }
- return p;
-#else
- sqlite3_int64 *p = (sqlite3_int64*)pPrior;
- assert( pPrior!=0 && nByte>0 );
- assert( nByte==ROUND8(nByte) ); /* EV: R-46199-30249 */
- p--;
- p = SQLITE_REALLOC(p, nByte+8 );
- if( p ){
- p[0] = nByte;
- p++;
- }else{
- testcase( sqlite3GlobalConfig.xLog!=0 );
- sqlite3_log(SQLITE_NOMEM,
- "failed memory resize %u to %u bytes",
- sqlite3MemSize(pPrior), nByte);
- }
- return (void*)p;
-#endif
-}
-
-/*
-** Round up a request size to the next valid allocation size.
-*/
-static int sqlite3MemRoundup(int n){
- return ROUND8(n);
-}
-
-/*
-** Initialize this module.
-*/
-static int sqlite3MemInit(void *NotUsed){
-#if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC)
- int cpuCount;
- size_t len;
- if( _sqliteZone_ ){
- return SQLITE_OK;
- }
- len = sizeof(cpuCount);
- /* One usually wants to use hw.acctivecpu for MT decisions, but not here */
- sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0);
- if( cpuCount>1 ){
- /* defer MT decisions to system malloc */
- _sqliteZone_ = malloc_default_zone();
- }else{
- /* only 1 core, use our own zone to contention over global locks,
- ** e.g. we have our own dedicated locks */
- bool success;
- malloc_zone_t* newzone = malloc_create_zone(4096, 0);
- malloc_set_zone_name(newzone, "Sqlite_Heap");
- do{
- success = OSAtomicCompareAndSwapPtrBarrier(NULL, newzone,
- (void * volatile *)&_sqliteZone_);
- }while(!_sqliteZone_);
- if( !success ){
- /* somebody registered a zone first */
- malloc_destroy_zone(newzone);
- }
- }
-#endif
- UNUSED_PARAMETER(NotUsed);
- return SQLITE_OK;
-}
-
-/*
-** Deinitialize this module.
-*/
-static void sqlite3MemShutdown(void *NotUsed){
- UNUSED_PARAMETER(NotUsed);
- return;
-}
-
-/*
-** This routine is the only routine in this file with external linkage.
-**
-** Populate the low-level memory allocation function pointers in
-** sqlite3GlobalConfig.m with pointers to the routines in this file.
-*/
-SQLITE_PRIVATE void sqlite3MemSetDefault(void){
- static const sqlite3_mem_methods defaultMethods = {
- sqlite3MemMalloc,
- sqlite3MemFree,
- sqlite3MemRealloc,
- sqlite3MemSize,
- sqlite3MemRoundup,
- sqlite3MemInit,
- sqlite3MemShutdown,
- 0
- };
- sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods);
-}
-
-#endif /* SQLITE_SYSTEM_MALLOC */
-
-/************** End of mem1.c ************************************************/
-/************** Begin file mem2.c ********************************************/
-/*
-** 2007 August 15
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** This file contains low-level memory allocation drivers for when
-** SQLite will use the standard C-library malloc/realloc/free interface
-** to obtain the memory it needs while adding lots of additional debugging
-** information to each allocation in order to help detect and fix memory
-** leaks and memory usage errors.
-**
-** This file contains implementations of the low-level memory allocation
-** routines specified in the sqlite3_mem_methods object.
-*/
-/* #include "sqliteInt.h" */
-
-/*
-** This version of the memory allocator is used only if the
-** SQLITE_MEMDEBUG macro is defined
-*/
-#ifdef SQLITE_MEMDEBUG
-
-/*
-** The backtrace functionality is only available with GLIBC
-*/
-#ifdef __GLIBC__
- extern int backtrace(void**,int);
- extern void backtrace_symbols_fd(void*const*,int,int);
-#else
-# define backtrace(A,B) 1
-# define backtrace_symbols_fd(A,B,C)
-#endif
-/* #include */
-
-/*
-** Each memory allocation looks like this:
-**
-** ------------------------------------------------------------------------
-** | Title | backtrace pointers | MemBlockHdr | allocation | EndGuard |
-** ------------------------------------------------------------------------
-**
-** The application code sees only a pointer to the allocation. We have
-** to back up from the allocation pointer to find the MemBlockHdr. The
-** MemBlockHdr tells us the size of the allocation and the number of
-** backtrace pointers. There is also a guard word at the end of the
-** MemBlockHdr.
-*/
-struct MemBlockHdr {
- i64 iSize; /* Size of this allocation */
- struct MemBlockHdr *pNext, *pPrev; /* Linked list of all unfreed memory */
- char nBacktrace; /* Number of backtraces on this alloc */
- char nBacktraceSlots; /* Available backtrace slots */
- u8 nTitle; /* Bytes of title; includes '\0' */
- u8 eType; /* Allocation type code */
- int iForeGuard; /* Guard word for sanity */
-};
-
-/*
-** Guard words
-*/
-#define FOREGUARD 0x80F5E153
-#define REARGUARD 0xE4676B53
-
-/*
-** Number of malloc size increments to track.
-*/
-#define NCSIZE 1000
-
-/*
-** All of the static variables used by this module are collected
-** into a single structure named "mem". This is to keep the
-** static variables organized and to reduce namespace pollution
-** when this module is combined with other in the amalgamation.
-*/
-static struct {
-
- /*
- ** Mutex to control access to the memory allocation subsystem.
- */
- sqlite3_mutex *mutex;
-
- /*
- ** Head and tail of a linked list of all outstanding allocations
- */
- struct MemBlockHdr *pFirst;
- struct MemBlockHdr *pLast;
-
- /*
- ** The number of levels of backtrace to save in new allocations.
- */
- int nBacktrace;
- void (*xBacktrace)(int, int, void **);
-
- /*
- ** Title text to insert in front of each block
- */
- int nTitle; /* Bytes of zTitle to save. Includes '\0' and padding */
- char zTitle[100]; /* The title text */
-
- /*
- ** sqlite3MallocDisallow() increments the following counter.
- ** sqlite3MallocAllow() decrements it.
- */
- int disallow; /* Do not allow memory allocation */
-
- /*
- ** Gather statistics on the sizes of memory allocations.
- ** nAlloc[i] is the number of allocation attempts of i*8
- ** bytes. i==NCSIZE is the number of allocation attempts for
- ** sizes more than NCSIZE*8 bytes.
- */
- int nAlloc[NCSIZE]; /* Total number of allocations */
- int nCurrent[NCSIZE]; /* Current number of allocations */
- int mxCurrent[NCSIZE]; /* Highwater mark for nCurrent */
-
-} mem;
-
-
-/*
-** Adjust memory usage statistics
-*/
-static void adjustStats(int iSize, int increment){
- int i = ROUND8(iSize)/8;
- if( i>NCSIZE-1 ){
- i = NCSIZE - 1;
- }
- if( increment>0 ){
- mem.nAlloc[i]++;
- mem.nCurrent[i]++;
- if( mem.nCurrent[i]>mem.mxCurrent[i] ){
- mem.mxCurrent[i] = mem.nCurrent[i];
- }
- }else{
- mem.nCurrent[i]--;
- assert( mem.nCurrent[i]>=0 );
- }
-}
-
-/*
-** Given an allocation, find the MemBlockHdr for that allocation.
-**
-** This routine checks the guards at either end of the allocation and
-** if they are incorrect it asserts.
-*/
-static struct MemBlockHdr *sqlite3MemsysGetHeader(void *pAllocation){
- struct MemBlockHdr *p;
- int *pInt;
- u8 *pU8;
- int nReserve;
-
- p = (struct MemBlockHdr*)pAllocation;
- p--;
- assert( p->iForeGuard==(int)FOREGUARD );
- nReserve = ROUND8(p->iSize);
- pInt = (int*)pAllocation;
- pU8 = (u8*)pAllocation;
- assert( pInt[nReserve/sizeof(int)]==(int)REARGUARD );
- /* This checks any of the "extra" bytes allocated due
- ** to rounding up to an 8 byte boundary to ensure
- ** they haven't been overwritten.
- */
- while( nReserve-- > p->iSize ) assert( pU8[nReserve]==0x65 );
- return p;
-}
-
-/*
-** Return the number of bytes currently allocated at address p.
-*/
-static int sqlite3MemSize(void *p){
- struct MemBlockHdr *pHdr;
- if( !p ){
- return 0;
- }
- pHdr = sqlite3MemsysGetHeader(p);
- return (int)pHdr->iSize;
-}
-
-/*
-** Initialize the memory allocation subsystem.
-*/
-static int sqlite3MemInit(void *NotUsed){
- UNUSED_PARAMETER(NotUsed);
- assert( (sizeof(struct MemBlockHdr)&7) == 0 );
- if( !sqlite3GlobalConfig.bMemstat ){
- /* If memory status is enabled, then the malloc.c wrapper will already
- ** hold the STATIC_MEM mutex when the routines here are invoked. */
- mem.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM);
- }
- return SQLITE_OK;
-}
-
-/*
-** Deinitialize the memory allocation subsystem.
-*/
-static void sqlite3MemShutdown(void *NotUsed){
- UNUSED_PARAMETER(NotUsed);
- mem.mutex = 0;
-}
-
-/*
-** Round up a request size to the next valid allocation size.
-*/
-static int sqlite3MemRoundup(int n){
- return ROUND8(n);
-}
-
-/*
-** Fill a buffer with pseudo-random bytes. This is used to preset
-** the content of a new memory allocation to unpredictable values and
-** to clear the content of a freed allocation to unpredictable values.
-*/
-static void randomFill(char *pBuf, int nByte){
- unsigned int x, y, r;
- x = SQLITE_PTR_TO_INT(pBuf);
- y = nByte | 1;
- while( nByte >= 4 ){
- x = (x>>1) ^ (-(int)(x&1) & 0xd0000001);
- y = y*1103515245 + 12345;
- r = x ^ y;
- *(int*)pBuf = r;
- pBuf += 4;
- nByte -= 4;
- }
- while( nByte-- > 0 ){
- x = (x>>1) ^ (-(int)(x&1) & 0xd0000001);
- y = y*1103515245 + 12345;
- r = x ^ y;
- *(pBuf++) = r & 0xff;
- }
-}
-
-/*
-** Allocate nByte bytes of memory.
-*/
-static void *sqlite3MemMalloc(int nByte){
- struct MemBlockHdr *pHdr;
- void **pBt;
- char *z;
- int *pInt;
- void *p = 0;
- int totalSize;
- int nReserve;
- sqlite3_mutex_enter(mem.mutex);
- assert( mem.disallow==0 );
- nReserve = ROUND8(nByte);
- totalSize = nReserve + sizeof(*pHdr) + sizeof(int) +
- mem.nBacktrace*sizeof(void*) + mem.nTitle;
- p = malloc(totalSize);
- if( p ){
- z = p;
- pBt = (void**)&z[mem.nTitle];
- pHdr = (struct MemBlockHdr*)&pBt[mem.nBacktrace];
- pHdr->pNext = 0;
- pHdr->pPrev = mem.pLast;
- if( mem.pLast ){
- mem.pLast->pNext = pHdr;
- }else{
- mem.pFirst = pHdr;
- }
- mem.pLast = pHdr;
- pHdr->iForeGuard = FOREGUARD;
- pHdr->eType = MEMTYPE_HEAP;
- pHdr->nBacktraceSlots = mem.nBacktrace;
- pHdr->nTitle = mem.nTitle;
- if( mem.nBacktrace ){
- void *aAddr[40];
- pHdr->nBacktrace = backtrace(aAddr, mem.nBacktrace+1)-1;
- memcpy(pBt, &aAddr[1], pHdr->nBacktrace*sizeof(void*));
- assert(pBt[0]);
- if( mem.xBacktrace ){
- mem.xBacktrace(nByte, pHdr->nBacktrace-1, &aAddr[1]);
- }
- }else{
- pHdr->nBacktrace = 0;
- }
- if( mem.nTitle ){
- memcpy(z, mem.zTitle, mem.nTitle);
- }
- pHdr->iSize = nByte;
- adjustStats(nByte, +1);
- pInt = (int*)&pHdr[1];
- pInt[nReserve/sizeof(int)] = REARGUARD;
- randomFill((char*)pInt, nByte);
- memset(((char*)pInt)+nByte, 0x65, nReserve-nByte);
- p = (void*)pInt;
- }
- sqlite3_mutex_leave(mem.mutex);
- return p;
-}
-
-/*
-** Free memory.
-*/
-static void sqlite3MemFree(void *pPrior){
- struct MemBlockHdr *pHdr;
- void **pBt;
- char *z;
- assert( sqlite3GlobalConfig.bMemstat || sqlite3GlobalConfig.bCoreMutex==0
- || mem.mutex!=0 );
- pHdr = sqlite3MemsysGetHeader(pPrior);
- pBt = (void**)pHdr;
- pBt -= pHdr->nBacktraceSlots;
- sqlite3_mutex_enter(mem.mutex);
- if( pHdr->pPrev ){
- assert( pHdr->pPrev->pNext==pHdr );
- pHdr->pPrev->pNext = pHdr->pNext;
- }else{
- assert( mem.pFirst==pHdr );
- mem.pFirst = pHdr->pNext;
- }
- if( pHdr->pNext ){
- assert( pHdr->pNext->pPrev==pHdr );
- pHdr->pNext->pPrev = pHdr->pPrev;
- }else{
- assert( mem.pLast==pHdr );
- mem.pLast = pHdr->pPrev;
- }
- z = (char*)pBt;
- z -= pHdr->nTitle;
- adjustStats((int)pHdr->iSize, -1);
- randomFill(z, sizeof(void*)*pHdr->nBacktraceSlots + sizeof(*pHdr) +
- (int)pHdr->iSize + sizeof(int) + pHdr->nTitle);
- free(z);
- sqlite3_mutex_leave(mem.mutex);
-}
-
-/*
-** Change the size of an existing memory allocation.
-**
-** For this debugging implementation, we *always* make a copy of the
-** allocation into a new place in memory. In this way, if the
-** higher level code is using pointer to the old allocation, it is
-** much more likely to break and we are much more liking to find
-** the error.
-*/
-static void *sqlite3MemRealloc(void *pPrior, int nByte){
- struct MemBlockHdr *pOldHdr;
- void *pNew;
- assert( mem.disallow==0 );
- assert( (nByte & 7)==0 ); /* EV: R-46199-30249 */
- pOldHdr = sqlite3MemsysGetHeader(pPrior);
- pNew = sqlite3MemMalloc(nByte);
- if( pNew ){
- memcpy(pNew, pPrior, (int)(nByteiSize ? nByte : pOldHdr->iSize));
- if( nByte>pOldHdr->iSize ){
- randomFill(&((char*)pNew)[pOldHdr->iSize], nByte - (int)pOldHdr->iSize);
- }
- sqlite3MemFree(pPrior);
- }
- return pNew;
-}
-
-/*
-** Populate the low-level memory allocation function pointers in
-** sqlite3GlobalConfig.m with pointers to the routines in this file.
-*/
-SQLITE_PRIVATE void sqlite3MemSetDefault(void){
- static const sqlite3_mem_methods defaultMethods = {
- sqlite3MemMalloc,
- sqlite3MemFree,
- sqlite3MemRealloc,
- sqlite3MemSize,
- sqlite3MemRoundup,
- sqlite3MemInit,
- sqlite3MemShutdown,
- 0
- };
- sqlite3_config(SQLITE_CONFIG_MALLOC, &defaultMethods);
-}
-
-/*
-** Set the "type" of an allocation.
-*/
-SQLITE_PRIVATE void sqlite3MemdebugSetType(void *p, u8 eType){
- if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){
- struct MemBlockHdr *pHdr;
- pHdr = sqlite3MemsysGetHeader(p);
- assert( pHdr->iForeGuard==FOREGUARD );
- pHdr->eType = eType;
- }
-}
-
-/*
-** Return TRUE if the mask of type in eType matches the type of the
-** allocation p. Also return true if p==NULL.
-**
-** This routine is designed for use within an assert() statement, to
-** verify the type of an allocation. For example:
-**
-** assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
-*/
-SQLITE_PRIVATE int sqlite3MemdebugHasType(void *p, u8 eType){
- int rc = 1;
- if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){
- struct MemBlockHdr *pHdr;
- pHdr = sqlite3MemsysGetHeader(p);
- assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */
- if( (pHdr->eType&eType)==0 ){
- rc = 0;
- }
- }
- return rc;
-}
-
-/*
-** Return TRUE if the mask of type in eType matches no bits of the type of the
-** allocation p. Also return true if p==NULL.
-**
-** This routine is designed for use within an assert() statement, to
-** verify the type of an allocation. For example:
-**
-** assert( sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) );
-*/
-SQLITE_PRIVATE int sqlite3MemdebugNoType(void *p, u8 eType){
- int rc = 1;
- if( p && sqlite3GlobalConfig.m.xMalloc==sqlite3MemMalloc ){
- struct MemBlockHdr *pHdr;
- pHdr = sqlite3MemsysGetHeader(p);
- assert( pHdr->iForeGuard==FOREGUARD ); /* Allocation is valid */
- if( (pHdr->eType&eType)!=0 ){
- rc = 0;
- }
- }
- return rc;
-}
-
-/*
-** Set the number of backtrace levels kept for each allocation.
-** A value of zero turns off backtracing. The number is always rounded
-** up to a multiple of 2.
-*/
-SQLITE_PRIVATE void sqlite3MemdebugBacktrace(int depth){
- if( depth<0 ){ depth = 0; }
- if( depth>20 ){ depth = 20; }
- depth = (depth+1)&0xfe;
- mem.nBacktrace = depth;
-}
-
-SQLITE_PRIVATE void sqlite3MemdebugBacktraceCallback(void (*xBacktrace)(int, int, void **)){
- mem.xBacktrace = xBacktrace;
-}
-
-/*
-** Set the title string for subsequent allocations.
-*/
-SQLITE_PRIVATE void sqlite3MemdebugSettitle(const char *zTitle){
- unsigned int n = sqlite3Strlen30(zTitle) + 1;
- sqlite3_mutex_enter(mem.mutex);
- if( n>=sizeof(mem.zTitle) ) n = sizeof(mem.zTitle)-1;
- memcpy(mem.zTitle, zTitle, n);
- mem.zTitle[n] = 0;
- mem.nTitle = ROUND8(n);
- sqlite3_mutex_leave(mem.mutex);
-}
-
-SQLITE_PRIVATE void sqlite3MemdebugSync(){
- struct MemBlockHdr *pHdr;
- for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){
- void **pBt = (void**)pHdr;
- pBt -= pHdr->nBacktraceSlots;
- mem.xBacktrace((int)pHdr->iSize, pHdr->nBacktrace-1, &pBt[1]);
- }
-}
-
-/*
-** Open the file indicated and write a log of all unfreed memory
-** allocations into that log.
-*/
-SQLITE_PRIVATE void sqlite3MemdebugDump(const char *zFilename){
- FILE *out;
- struct MemBlockHdr *pHdr;
- void **pBt;
- int i;
- out = fopen(zFilename, "w");
- if( out==0 ){
- fprintf(stderr, "** Unable to output memory debug output log: %s **\n",
- zFilename);
- return;
- }
- for(pHdr=mem.pFirst; pHdr; pHdr=pHdr->pNext){
- char *z = (char*)pHdr;
- z -= pHdr->nBacktraceSlots*sizeof(void*) + pHdr->nTitle;
- fprintf(out, "**** %lld bytes at %p from %s ****\n",
- pHdr->iSize, &pHdr[1], pHdr->nTitle ? z : "???");
- if( pHdr->nBacktrace ){
- fflush(out);
- pBt = (void**)pHdr;
- pBt -= pHdr->nBacktraceSlots;
- backtrace_symbols_fd(pBt, pHdr->nBacktrace, fileno(out));
- fprintf(out, "\n");
- }
- }
- fprintf(out, "COUNTS:\n");
- for(i=0; i=1 );
- size = mem3.aPool[i-1].u.hdr.size4x/4;
- assert( size==mem3.aPool[i+size-1].u.hdr.prevSize );
- assert( size>=2 );
- if( size <= MX_SMALL ){
- memsys3UnlinkFromList(i, &mem3.aiSmall[size-2]);
- }else{
- hash = size % N_HASH;
- memsys3UnlinkFromList(i, &mem3.aiHash[hash]);
- }
-}
-
-/*
-** Link the chunk at mem3.aPool[i] so that is on the list rooted
-** at *pRoot.
-*/
-static void memsys3LinkIntoList(u32 i, u32 *pRoot){
- assert( sqlite3_mutex_held(mem3.mutex) );
- mem3.aPool[i].u.list.next = *pRoot;
- mem3.aPool[i].u.list.prev = 0;
- if( *pRoot ){
- mem3.aPool[*pRoot].u.list.prev = i;
- }
- *pRoot = i;
-}
-
-/*
-** Link the chunk at index i into either the appropriate
-** small chunk list, or into the large chunk hash table.
-*/
-static void memsys3Link(u32 i){
- u32 size, hash;
- assert( sqlite3_mutex_held(mem3.mutex) );
- assert( i>=1 );
- assert( (mem3.aPool[i-1].u.hdr.size4x & 1)==0 );
- size = mem3.aPool[i-1].u.hdr.size4x/4;
- assert( size==mem3.aPool[i+size-1].u.hdr.prevSize );
- assert( size>=2 );
- if( size <= MX_SMALL ){
- memsys3LinkIntoList(i, &mem3.aiSmall[size-2]);
- }else{
- hash = size % N_HASH;
- memsys3LinkIntoList(i, &mem3.aiHash[hash]);
- }
-}
-
-/*
-** If the STATIC_MEM mutex is not already held, obtain it now. The mutex
-** will already be held (obtained by code in malloc.c) if
-** sqlite3GlobalConfig.bMemStat is true.
-*/
-static void memsys3Enter(void){
- if( sqlite3GlobalConfig.bMemstat==0 && mem3.mutex==0 ){
- mem3.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM);
- }
- sqlite3_mutex_enter(mem3.mutex);
-}
-static void memsys3Leave(void){
- sqlite3_mutex_leave(mem3.mutex);
-}
-
-/*
-** Called when we are unable to satisfy an allocation of nBytes.
-*/
-static void memsys3OutOfMemory(int nByte){
- if( !mem3.alarmBusy ){
- mem3.alarmBusy = 1;
- assert( sqlite3_mutex_held(mem3.mutex) );
- sqlite3_mutex_leave(mem3.mutex);
- sqlite3_release_memory(nByte);
- sqlite3_mutex_enter(mem3.mutex);
- mem3.alarmBusy = 0;
- }
-}
-
-
-/*
-** Chunk i is a free chunk that has been unlinked. Adjust its
-** size parameters for check-out and return a pointer to the
-** user portion of the chunk.
-*/
-static void *memsys3Checkout(u32 i, u32 nBlock){
- u32 x;
- assert( sqlite3_mutex_held(mem3.mutex) );
- assert( i>=1 );
- assert( mem3.aPool[i-1].u.hdr.size4x/4==nBlock );
- assert( mem3.aPool[i+nBlock-1].u.hdr.prevSize==nBlock );
- x = mem3.aPool[i-1].u.hdr.size4x;
- mem3.aPool[i-1].u.hdr.size4x = nBlock*4 | 1 | (x&2);
- mem3.aPool[i+nBlock-1].u.hdr.prevSize = nBlock;
- mem3.aPool[i+nBlock-1].u.hdr.size4x |= 2;
- return &mem3.aPool[i];
-}
-
-/*
-** Carve a piece off of the end of the mem3.iMaster free chunk.
-** Return a pointer to the new allocation. Or, if the master chunk
-** is not large enough, return 0.
-*/
-static void *memsys3FromMaster(u32 nBlock){
- assert( sqlite3_mutex_held(mem3.mutex) );
- assert( mem3.szMaster>=nBlock );
- if( nBlock>=mem3.szMaster-1 ){
- /* Use the entire master */
- void *p = memsys3Checkout(mem3.iMaster, mem3.szMaster);
- mem3.iMaster = 0;
- mem3.szMaster = 0;
- mem3.mnMaster = 0;
- return p;
- }else{
- /* Split the master block. Return the tail. */
- u32 newi, x;
- newi = mem3.iMaster + mem3.szMaster - nBlock;
- assert( newi > mem3.iMaster+1 );
- mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = nBlock;
- mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x |= 2;
- mem3.aPool[newi-1].u.hdr.size4x = nBlock*4 + 1;
- mem3.szMaster -= nBlock;
- mem3.aPool[newi-1].u.hdr.prevSize = mem3.szMaster;
- x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2;
- mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x;
- if( mem3.szMaster < mem3.mnMaster ){
- mem3.mnMaster = mem3.szMaster;
- }
- return (void*)&mem3.aPool[newi];
- }
-}
-
-/*
-** *pRoot is the head of a list of free chunks of the same size
-** or same size hash. In other words, *pRoot is an entry in either
-** mem3.aiSmall[] or mem3.aiHash[].
-**
-** This routine examines all entries on the given list and tries
-** to coalesce each entries with adjacent free chunks.
-**
-** If it sees a chunk that is larger than mem3.iMaster, it replaces
-** the current mem3.iMaster with the new larger chunk. In order for
-** this mem3.iMaster replacement to work, the master chunk must be
-** linked into the hash tables. That is not the normal state of
-** affairs, of course. The calling routine must link the master
-** chunk before invoking this routine, then must unlink the (possibly
-** changed) master chunk once this routine has finished.
-*/
-static void memsys3Merge(u32 *pRoot){
- u32 iNext, prev, size, i, x;
-
- assert( sqlite3_mutex_held(mem3.mutex) );
- for(i=*pRoot; i>0; i=iNext){
- iNext = mem3.aPool[i].u.list.next;
- size = mem3.aPool[i-1].u.hdr.size4x;
- assert( (size&1)==0 );
- if( (size&2)==0 ){
- memsys3UnlinkFromList(i, pRoot);
- assert( i > mem3.aPool[i-1].u.hdr.prevSize );
- prev = i - mem3.aPool[i-1].u.hdr.prevSize;
- if( prev==iNext ){
- iNext = mem3.aPool[prev].u.list.next;
- }
- memsys3Unlink(prev);
- size = i + size/4 - prev;
- x = mem3.aPool[prev-1].u.hdr.size4x & 2;
- mem3.aPool[prev-1].u.hdr.size4x = size*4 | x;
- mem3.aPool[prev+size-1].u.hdr.prevSize = size;
- memsys3Link(prev);
- i = prev;
- }else{
- size /= 4;
- }
- if( size>mem3.szMaster ){
- mem3.iMaster = i;
- mem3.szMaster = size;
- }
- }
-}
-
-/*
-** Return a block of memory of at least nBytes in size.
-** Return NULL if unable.
-**
-** This function assumes that the necessary mutexes, if any, are
-** already held by the caller. Hence "Unsafe".
-*/
-static void *memsys3MallocUnsafe(int nByte){
- u32 i;
- u32 nBlock;
- u32 toFree;
-
- assert( sqlite3_mutex_held(mem3.mutex) );
- assert( sizeof(Mem3Block)==8 );
- if( nByte<=12 ){
- nBlock = 2;
- }else{
- nBlock = (nByte + 11)/8;
- }
- assert( nBlock>=2 );
-
- /* STEP 1:
- ** Look for an entry of the correct size in either the small
- ** chunk table or in the large chunk hash table. This is
- ** successful most of the time (about 9 times out of 10).
- */
- if( nBlock <= MX_SMALL ){
- i = mem3.aiSmall[nBlock-2];
- if( i>0 ){
- memsys3UnlinkFromList(i, &mem3.aiSmall[nBlock-2]);
- return memsys3Checkout(i, nBlock);
- }
- }else{
- int hash = nBlock % N_HASH;
- for(i=mem3.aiHash[hash]; i>0; i=mem3.aPool[i].u.list.next){
- if( mem3.aPool[i-1].u.hdr.size4x/4==nBlock ){
- memsys3UnlinkFromList(i, &mem3.aiHash[hash]);
- return memsys3Checkout(i, nBlock);
- }
- }
- }
-
- /* STEP 2:
- ** Try to satisfy the allocation by carving a piece off of the end
- ** of the master chunk. This step usually works if step 1 fails.
- */
- if( mem3.szMaster>=nBlock ){
- return memsys3FromMaster(nBlock);
- }
-
-
- /* STEP 3:
- ** Loop through the entire memory pool. Coalesce adjacent free
- ** chunks. Recompute the master chunk as the largest free chunk.
- ** Then try again to satisfy the allocation by carving a piece off
- ** of the end of the master chunk. This step happens very
- ** rarely (we hope!)
- */
- for(toFree=nBlock*16; toFree<(mem3.nPool*16); toFree *= 2){
- memsys3OutOfMemory(toFree);
- if( mem3.iMaster ){
- memsys3Link(mem3.iMaster);
- mem3.iMaster = 0;
- mem3.szMaster = 0;
- }
- for(i=0; i=nBlock ){
- return memsys3FromMaster(nBlock);
- }
- }
- }
-
- /* If none of the above worked, then we fail. */
- return 0;
-}
-
-/*
-** Free an outstanding memory allocation.
-**
-** This function assumes that the necessary mutexes, if any, are
-** already held by the caller. Hence "Unsafe".
-*/
-static void memsys3FreeUnsafe(void *pOld){
- Mem3Block *p = (Mem3Block*)pOld;
- int i;
- u32 size, x;
- assert( sqlite3_mutex_held(mem3.mutex) );
- assert( p>mem3.aPool && p<&mem3.aPool[mem3.nPool] );
- i = p - mem3.aPool;
- assert( (mem3.aPool[i-1].u.hdr.size4x&1)==1 );
- size = mem3.aPool[i-1].u.hdr.size4x/4;
- assert( i+size<=mem3.nPool+1 );
- mem3.aPool[i-1].u.hdr.size4x &= ~1;
- mem3.aPool[i+size-1].u.hdr.prevSize = size;
- mem3.aPool[i+size-1].u.hdr.size4x &= ~2;
- memsys3Link(i);
-
- /* Try to expand the master using the newly freed chunk */
- if( mem3.iMaster ){
- while( (mem3.aPool[mem3.iMaster-1].u.hdr.size4x&2)==0 ){
- size = mem3.aPool[mem3.iMaster-1].u.hdr.prevSize;
- mem3.iMaster -= size;
- mem3.szMaster += size;
- memsys3Unlink(mem3.iMaster);
- x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2;
- mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x;
- mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster;
- }
- x = mem3.aPool[mem3.iMaster-1].u.hdr.size4x & 2;
- while( (mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x&1)==0 ){
- memsys3Unlink(mem3.iMaster+mem3.szMaster);
- mem3.szMaster += mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.size4x/4;
- mem3.aPool[mem3.iMaster-1].u.hdr.size4x = mem3.szMaster*4 | x;
- mem3.aPool[mem3.iMaster+mem3.szMaster-1].u.hdr.prevSize = mem3.szMaster;
- }
- }
-}
-
-/*
-** Return the size of an outstanding allocation, in bytes. The
-** size returned omits the 8-byte header overhead. This only
-** works for chunks that are currently checked out.
-*/
-static int memsys3Size(void *p){
- Mem3Block *pBlock;
- assert( p!=0 );
- pBlock = (Mem3Block*)p;
- assert( (pBlock[-1].u.hdr.size4x&1)!=0 );
- return (pBlock[-1].u.hdr.size4x&~3)*2 - 4;
-}
-
-/*
-** Round up a request size to the next valid allocation size.
-*/
-static int memsys3Roundup(int n){
- if( n<=12 ){
- return 12;
- }else{
- return ((n+11)&~7) - 4;
- }
-}
-
-/*
-** Allocate nBytes of memory.
-*/
-static void *memsys3Malloc(int nBytes){
- sqlite3_int64 *p;
- assert( nBytes>0 ); /* malloc.c filters out 0 byte requests */
- memsys3Enter();
- p = memsys3MallocUnsafe(nBytes);
- memsys3Leave();
- return (void*)p;
-}
-
-/*
-** Free memory.
-*/
-static void memsys3Free(void *pPrior){
- assert( pPrior );
- memsys3Enter();
- memsys3FreeUnsafe(pPrior);
- memsys3Leave();
-}
-
-/*
-** Change the size of an existing memory allocation
-*/
-static void *memsys3Realloc(void *pPrior, int nBytes){
- int nOld;
- void *p;
- if( pPrior==0 ){
- return sqlite3_malloc(nBytes);
- }
- if( nBytes<=0 ){
- sqlite3_free(pPrior);
- return 0;
- }
- nOld = memsys3Size(pPrior);
- if( nBytes<=nOld && nBytes>=nOld-128 ){
- return pPrior;
- }
- memsys3Enter();
- p = memsys3MallocUnsafe(nBytes);
- if( p ){
- if( nOld>1)!=(size&1) ){
- fprintf(out, "%p tail checkout bit is incorrect\n", &mem3.aPool[i]);
- assert( 0 );
- break;
- }
- if( size&1 ){
- fprintf(out, "%p %6d bytes checked out\n", &mem3.aPool[i], (size/4)*8-8);
- }else{
- fprintf(out, "%p %6d bytes free%s\n", &mem3.aPool[i], (size/4)*8-8,
- i==mem3.iMaster ? " **master**" : "");
- }
- }
- for(i=0; i0; j=mem3.aPool[j].u.list.next){
- fprintf(out, " %p(%d)", &mem3.aPool[j],
- (mem3.aPool[j-1].u.hdr.size4x/4)*8-8);
- }
- fprintf(out, "\n");
- }
- for(i=0; i0; j=mem3.aPool[j].u.list.next){
- fprintf(out, " %p(%d)", &mem3.aPool[j],
- (mem3.aPool[j-1].u.hdr.size4x/4)*8-8);
- }
- fprintf(out, "\n");
- }
- fprintf(out, "master=%d\n", mem3.iMaster);
- fprintf(out, "nowUsed=%d\n", mem3.nPool*8 - mem3.szMaster*8);
- fprintf(out, "mxUsed=%d\n", mem3.nPool*8 - mem3.mnMaster*8);
- sqlite3_mutex_leave(mem3.mutex);
- if( out==stdout ){
- fflush(stdout);
- }else{
- fclose(out);
- }
-#else
- UNUSED_PARAMETER(zFilename);
-#endif
-}
-
-/*
-** This routine is the only routine in this file with external
-** linkage.
-**
-** Populate the low-level memory allocation function pointers in
-** sqlite3GlobalConfig.m with pointers to the routines in this file. The
-** arguments specify the block of memory to manage.
-**
-** This routine is only called by sqlite3_config(), and therefore
-** is not required to be threadsafe (it is not).
-*/
-SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys3(void){
- static const sqlite3_mem_methods mempoolMethods = {
- memsys3Malloc,
- memsys3Free,
- memsys3Realloc,
- memsys3Size,
- memsys3Roundup,
- memsys3Init,
- memsys3Shutdown,
- 0
- };
- return &mempoolMethods;
-}
-
-#endif /* SQLITE_ENABLE_MEMSYS3 */
-
-/************** End of mem3.c ************************************************/
-/************** Begin file mem5.c ********************************************/
-/*
-** 2007 October 14
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This file contains the C functions that implement a memory
-** allocation subsystem for use by SQLite.
-**
-** This version of the memory allocation subsystem omits all
-** use of malloc(). The application gives SQLite a block of memory
-** before calling sqlite3_initialize() from which allocations
-** are made and returned by the xMalloc() and xRealloc()
-** implementations. Once sqlite3_initialize() has been called,
-** the amount of memory available to SQLite is fixed and cannot
-** be changed.
-**
-** This version of the memory allocation subsystem is included
-** in the build only if SQLITE_ENABLE_MEMSYS5 is defined.
-**
-** This memory allocator uses the following algorithm:
-**
-** 1. All memory allocation sizes are rounded up to a power of 2.
-**
-** 2. If two adjacent free blocks are the halves of a larger block,
-** then the two blocks are coalesced into the single larger block.
-**
-** 3. New memory is allocated from the first available free block.
-**
-** This algorithm is described in: J. M. Robson. "Bounds for Some Functions
-** Concerning Dynamic Storage Allocation". Journal of the Association for
-** Computing Machinery, Volume 21, Number 8, July 1974, pages 491-499.
-**
-** Let n be the size of the largest allocation divided by the minimum
-** allocation size (after rounding all sizes up to a power of 2.) Let M
-** be the maximum amount of memory ever outstanding at one time. Let
-** N be the total amount of memory available for allocation. Robson
-** proved that this memory allocator will never breakdown due to
-** fragmentation as long as the following constraint holds:
-**
-** N >= M*(1 + log2(n)/2) - n + 1
-**
-** The sqlite3_status() logic tracks the maximum values of n and M so
-** that an application can, at any time, verify this constraint.
-*/
-/* #include "sqliteInt.h" */
-
-/*
-** This version of the memory allocator is used only when
-** SQLITE_ENABLE_MEMSYS5 is defined.
-*/
-#ifdef SQLITE_ENABLE_MEMSYS5
-
-/*
-** A minimum allocation is an instance of the following structure.
-** Larger allocations are an array of these structures where the
-** size of the array is a power of 2.
-**
-** The size of this object must be a power of two. That fact is
-** verified in memsys5Init().
-*/
-typedef struct Mem5Link Mem5Link;
-struct Mem5Link {
- int next; /* Index of next free chunk */
- int prev; /* Index of previous free chunk */
-};
-
-/*
-** Maximum size of any allocation is ((1<=0 && i=0 && iLogsize<=LOGMAX );
- assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize );
-
- next = MEM5LINK(i)->next;
- prev = MEM5LINK(i)->prev;
- if( prev<0 ){
- mem5.aiFreelist[iLogsize] = next;
- }else{
- MEM5LINK(prev)->next = next;
- }
- if( next>=0 ){
- MEM5LINK(next)->prev = prev;
- }
-}
-
-/*
-** Link the chunk at mem5.aPool[i] so that is on the iLogsize
-** free list.
-*/
-static void memsys5Link(int i, int iLogsize){
- int x;
- assert( sqlite3_mutex_held(mem5.mutex) );
- assert( i>=0 && i=0 && iLogsize<=LOGMAX );
- assert( (mem5.aCtrl[i] & CTRL_LOGSIZE)==iLogsize );
-
- x = MEM5LINK(i)->next = mem5.aiFreelist[iLogsize];
- MEM5LINK(i)->prev = -1;
- if( x>=0 ){
- assert( xprev = i;
- }
- mem5.aiFreelist[iLogsize] = i;
-}
-
-/*
-** Obtain or release the mutex needed to access global data structures.
-*/
-static void memsys5Enter(void){
- sqlite3_mutex_enter(mem5.mutex);
-}
-static void memsys5Leave(void){
- sqlite3_mutex_leave(mem5.mutex);
-}
-
-/*
-** Return the size of an outstanding allocation, in bytes.
-** This only works for chunks that are currently checked out.
-*/
-static int memsys5Size(void *p){
- int iSize, i;
- assert( p!=0 );
- i = (int)(((u8 *)p-mem5.zPool)/mem5.szAtom);
- assert( i>=0 && i0 );
-
- /* No more than 1GiB per allocation */
- if( nByte > 0x40000000 ) return 0;
-
-#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
- /* Keep track of the maximum allocation request. Even unfulfilled
- ** requests are counted */
- if( (u32)nByte>mem5.maxRequest ){
- mem5.maxRequest = nByte;
- }
-#endif
-
-
- /* Round nByte up to the next valid power of two */
- for(iFullSz=mem5.szAtom,iLogsize=0; iFullSzLOGMAX ){
- testcase( sqlite3GlobalConfig.xLog!=0 );
- sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes", nByte);
- return 0;
- }
- i = mem5.aiFreelist[iBin];
- memsys5Unlink(i, iBin);
- while( iBin>iLogsize ){
- int newSize;
-
- iBin--;
- newSize = 1 << iBin;
- mem5.aCtrl[i+newSize] = CTRL_FREE | iBin;
- memsys5Link(i+newSize, iBin);
- }
- mem5.aCtrl[i] = iLogsize;
-
-#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
- /* Update allocator performance statistics. */
- mem5.nAlloc++;
- mem5.totalAlloc += iFullSz;
- mem5.totalExcess += iFullSz - nByte;
- mem5.currentCount++;
- mem5.currentOut += iFullSz;
- if( mem5.maxCount=0 && iBlock0 );
- assert( mem5.currentOut>=(size*mem5.szAtom) );
- mem5.currentCount--;
- mem5.currentOut -= size*mem5.szAtom;
- assert( mem5.currentOut>0 || mem5.currentCount==0 );
- assert( mem5.currentCount>0 || mem5.currentOut==0 );
-#endif
-
- mem5.aCtrl[iBlock] = CTRL_FREE | iLogsize;
- while( ALWAYS(iLogsize>iLogsize) & 1 ){
- iBuddy = iBlock - size;
- assert( iBuddy>=0 );
- }else{
- iBuddy = iBlock + size;
- if( iBuddy>=mem5.nBlock ) break;
- }
- if( mem5.aCtrl[iBuddy]!=(CTRL_FREE | iLogsize) ) break;
- memsys5Unlink(iBuddy, iLogsize);
- iLogsize++;
- if( iBuddy0 ){
- memsys5Enter();
- p = memsys5MallocUnsafe(nBytes);
- memsys5Leave();
- }
- return (void*)p;
-}
-
-/*
-** Free memory.
-**
-** The outer layer memory allocator prevents this routine from
-** being called with pPrior==0.
-*/
-static void memsys5Free(void *pPrior){
- assert( pPrior!=0 );
- memsys5Enter();
- memsys5FreeUnsafe(pPrior);
- memsys5Leave();
-}
-
-/*
-** Change the size of an existing memory allocation.
-**
-** The outer layer memory allocator prevents this routine from
-** being called with pPrior==0.
-**
-** nBytes is always a value obtained from a prior call to
-** memsys5Round(). Hence nBytes is always a non-negative power
-** of two. If nBytes==0 that means that an oversize allocation
-** (an allocation larger than 0x40000000) was requested and this
-** routine should return 0 without freeing pPrior.
-*/
-static void *memsys5Realloc(void *pPrior, int nBytes){
- int nOld;
- void *p;
- assert( pPrior!=0 );
- assert( (nBytes&(nBytes-1))==0 ); /* EV: R-46199-30249 */
- assert( nBytes>=0 );
- if( nBytes==0 ){
- return 0;
- }
- nOld = memsys5Size(pPrior);
- if( nBytes<=nOld ){
- return pPrior;
- }
- p = memsys5Malloc(nBytes);
- if( p ){
- memcpy(p, pPrior, nOld);
- memsys5Free(pPrior);
- }
- return p;
-}
-
-/*
-** Round up a request size to the next valid allocation size. If
-** the allocation is too large to be handled by this allocation system,
-** return 0.
-**
-** All allocations must be a power of two and must be expressed by a
-** 32-bit signed integer. Hence the largest allocation is 0x40000000
-** or 1073741824 bytes.
-*/
-static int memsys5Roundup(int n){
- int iFullSz;
- if( n > 0x40000000 ) return 0;
- for(iFullSz=mem5.szAtom; iFullSz 0
-** memsys5Log(2) -> 1
-** memsys5Log(4) -> 2
-** memsys5Log(5) -> 3
-** memsys5Log(8) -> 3
-** memsys5Log(9) -> 4
-*/
-static int memsys5Log(int iValue){
- int iLog;
- for(iLog=0; (iLog<(int)((sizeof(int)*8)-1)) && (1<mem5.szAtom ){
- mem5.szAtom = mem5.szAtom << 1;
- }
-
- mem5.nBlock = (nByte / (mem5.szAtom+sizeof(u8)));
- mem5.zPool = zByte;
- mem5.aCtrl = (u8 *)&mem5.zPool[mem5.nBlock*mem5.szAtom];
-
- for(ii=0; ii<=LOGMAX; ii++){
- mem5.aiFreelist[ii] = -1;
- }
-
- iOffset = 0;
- for(ii=LOGMAX; ii>=0; ii--){
- int nAlloc = (1<mem5.nBlock);
- }
-
- /* If a mutex is required for normal operation, allocate one */
- if( sqlite3GlobalConfig.bMemstat==0 ){
- mem5.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM);
- }
-
- return SQLITE_OK;
-}
-
-/*
-** Deinitialize this module.
-*/
-static void memsys5Shutdown(void *NotUsed){
- UNUSED_PARAMETER(NotUsed);
- mem5.mutex = 0;
- return;
-}
-
-#ifdef SQLITE_TEST
-/*
-** Open the file indicated and write a log of all unfreed memory
-** allocations into that log.
-*/
-SQLITE_PRIVATE void sqlite3Memsys5Dump(const char *zFilename){
- FILE *out;
- int i, j, n;
- int nMinLog;
-
- if( zFilename==0 || zFilename[0]==0 ){
- out = stdout;
- }else{
- out = fopen(zFilename, "w");
- if( out==0 ){
- fprintf(stderr, "** Unable to output memory debug output log: %s **\n",
- zFilename);
- return;
- }
- }
- memsys5Enter();
- nMinLog = memsys5Log(mem5.szAtom);
- for(i=0; i<=LOGMAX && i+nMinLog<32; i++){
- for(n=0, j=mem5.aiFreelist[i]; j>=0; j = MEM5LINK(j)->next, n++){}
- fprintf(out, "freelist items of size %d: %d\n", mem5.szAtom << i, n);
- }
- fprintf(out, "mem5.nAlloc = %llu\n", mem5.nAlloc);
- fprintf(out, "mem5.totalAlloc = %llu\n", mem5.totalAlloc);
- fprintf(out, "mem5.totalExcess = %llu\n", mem5.totalExcess);
- fprintf(out, "mem5.currentOut = %u\n", mem5.currentOut);
- fprintf(out, "mem5.currentCount = %u\n", mem5.currentCount);
- fprintf(out, "mem5.maxOut = %u\n", mem5.maxOut);
- fprintf(out, "mem5.maxCount = %u\n", mem5.maxCount);
- fprintf(out, "mem5.maxRequest = %u\n", mem5.maxRequest);
- memsys5Leave();
- if( out==stdout ){
- fflush(stdout);
- }else{
- fclose(out);
- }
-}
-#endif
-
-/*
-** This routine is the only routine in this file with external
-** linkage. It returns a pointer to a static sqlite3_mem_methods
-** struct populated with the memsys5 methods.
-*/
-SQLITE_PRIVATE const sqlite3_mem_methods *sqlite3MemGetMemsys5(void){
- static const sqlite3_mem_methods memsys5Methods = {
- memsys5Malloc,
- memsys5Free,
- memsys5Realloc,
- memsys5Size,
- memsys5Roundup,
- memsys5Init,
- memsys5Shutdown,
- 0
- };
- return &memsys5Methods;
-}
-
-#endif /* SQLITE_ENABLE_MEMSYS5 */
-
-/************** End of mem5.c ************************************************/
-/************** Begin file mutex.c *******************************************/
-/*
-** 2007 August 14
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This file contains the C functions that implement mutexes.
-**
-** This file contains code that is common across all mutex implementations.
-*/
-/* #include "sqliteInt.h" */
-
-#if defined(SQLITE_DEBUG) && !defined(SQLITE_MUTEX_OMIT)
-/*
-** For debugging purposes, record when the mutex subsystem is initialized
-** and uninitialized so that we can assert() if there is an attempt to
-** allocate a mutex while the system is uninitialized.
-*/
-static SQLITE_WSD int mutexIsInit = 0;
-#endif /* SQLITE_DEBUG && !defined(SQLITE_MUTEX_OMIT) */
-
-
-#ifndef SQLITE_MUTEX_OMIT
-/*
-** Initialize the mutex system.
-*/
-SQLITE_PRIVATE int sqlite3MutexInit(void){
- int rc = SQLITE_OK;
- if( !sqlite3GlobalConfig.mutex.xMutexAlloc ){
- /* If the xMutexAlloc method has not been set, then the user did not
- ** install a mutex implementation via sqlite3_config() prior to
- ** sqlite3_initialize() being called. This block copies pointers to
- ** the default implementation into the sqlite3GlobalConfig structure.
- */
- sqlite3_mutex_methods const *pFrom;
- sqlite3_mutex_methods *pTo = &sqlite3GlobalConfig.mutex;
-
- if( sqlite3GlobalConfig.bCoreMutex ){
- pFrom = sqlite3DefaultMutex();
- }else{
- pFrom = sqlite3NoopMutex();
- }
- pTo->xMutexInit = pFrom->xMutexInit;
- pTo->xMutexEnd = pFrom->xMutexEnd;
- pTo->xMutexFree = pFrom->xMutexFree;
- pTo->xMutexEnter = pFrom->xMutexEnter;
- pTo->xMutexTry = pFrom->xMutexTry;
- pTo->xMutexLeave = pFrom->xMutexLeave;
- pTo->xMutexHeld = pFrom->xMutexHeld;
- pTo->xMutexNotheld = pFrom->xMutexNotheld;
- sqlite3MemoryBarrier();
- pTo->xMutexAlloc = pFrom->xMutexAlloc;
- }
- assert( sqlite3GlobalConfig.mutex.xMutexInit );
- rc = sqlite3GlobalConfig.mutex.xMutexInit();
-
-#ifdef SQLITE_DEBUG
- GLOBAL(int, mutexIsInit) = 1;
-#endif
-
- return rc;
-}
-
-/*
-** Shutdown the mutex system. This call frees resources allocated by
-** sqlite3MutexInit().
-*/
-SQLITE_PRIVATE int sqlite3MutexEnd(void){
- int rc = SQLITE_OK;
- if( sqlite3GlobalConfig.mutex.xMutexEnd ){
- rc = sqlite3GlobalConfig.mutex.xMutexEnd();
- }
-
-#ifdef SQLITE_DEBUG
- GLOBAL(int, mutexIsInit) = 0;
-#endif
-
- return rc;
-}
-
-/*
-** Retrieve a pointer to a static mutex or allocate a new dynamic one.
-*/
-SQLITE_API sqlite3_mutex *SQLITE_STDCALL sqlite3_mutex_alloc(int id){
-#ifndef SQLITE_OMIT_AUTOINIT
- if( id<=SQLITE_MUTEX_RECURSIVE && sqlite3_initialize() ) return 0;
- if( id>SQLITE_MUTEX_RECURSIVE && sqlite3MutexInit() ) return 0;
-#endif
- assert( sqlite3GlobalConfig.mutex.xMutexAlloc );
- return sqlite3GlobalConfig.mutex.xMutexAlloc(id);
-}
-
-SQLITE_PRIVATE sqlite3_mutex *sqlite3MutexAlloc(int id){
- if( !sqlite3GlobalConfig.bCoreMutex ){
- return 0;
- }
- assert( GLOBAL(int, mutexIsInit) );
- assert( sqlite3GlobalConfig.mutex.xMutexAlloc );
- return sqlite3GlobalConfig.mutex.xMutexAlloc(id);
-}
-
-/*
-** Free a dynamic mutex.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_free(sqlite3_mutex *p){
- if( p ){
- assert( sqlite3GlobalConfig.mutex.xMutexFree );
- sqlite3GlobalConfig.mutex.xMutexFree(p);
- }
-}
-
-/*
-** Obtain the mutex p. If some other thread already has the mutex, block
-** until it can be obtained.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_enter(sqlite3_mutex *p){
- if( p ){
- assert( sqlite3GlobalConfig.mutex.xMutexEnter );
- sqlite3GlobalConfig.mutex.xMutexEnter(p);
- }
-}
-
-/*
-** Obtain the mutex p. If successful, return SQLITE_OK. Otherwise, if another
-** thread holds the mutex and it cannot be obtained, return SQLITE_BUSY.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_try(sqlite3_mutex *p){
- int rc = SQLITE_OK;
- if( p ){
- assert( sqlite3GlobalConfig.mutex.xMutexTry );
- return sqlite3GlobalConfig.mutex.xMutexTry(p);
- }
- return rc;
-}
-
-/*
-** The sqlite3_mutex_leave() routine exits a mutex that was previously
-** entered by the same thread. The behavior is undefined if the mutex
-** is not currently entered. If a NULL pointer is passed as an argument
-** this function is a no-op.
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_mutex_leave(sqlite3_mutex *p){
- if( p ){
- assert( sqlite3GlobalConfig.mutex.xMutexLeave );
- sqlite3GlobalConfig.mutex.xMutexLeave(p);
- }
-}
-
-#ifndef NDEBUG
-/*
-** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
-** intended for use inside assert() statements.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_held(sqlite3_mutex *p){
- assert( p==0 || sqlite3GlobalConfig.mutex.xMutexHeld );
- return p==0 || sqlite3GlobalConfig.mutex.xMutexHeld(p);
-}
-SQLITE_API int SQLITE_STDCALL sqlite3_mutex_notheld(sqlite3_mutex *p){
- assert( p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld );
- return p==0 || sqlite3GlobalConfig.mutex.xMutexNotheld(p);
-}
-#endif
-
-#endif /* !defined(SQLITE_MUTEX_OMIT) */
-
-/************** End of mutex.c ***********************************************/
-/************** Begin file mutex_noop.c **************************************/
-/*
-** 2008 October 07
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This file contains the C functions that implement mutexes.
-**
-** This implementation in this file does not provide any mutual
-** exclusion and is thus suitable for use only in applications
-** that use SQLite in a single thread. The routines defined
-** here are place-holders. Applications can substitute working
-** mutex routines at start-time using the
-**
-** sqlite3_config(SQLITE_CONFIG_MUTEX,...)
-**
-** interface.
-**
-** If compiled with SQLITE_DEBUG, then additional logic is inserted
-** that does error checking on mutexes to make sure they are being
-** called correctly.
-*/
-/* #include "sqliteInt.h" */
-
-#ifndef SQLITE_MUTEX_OMIT
-
-#ifndef SQLITE_DEBUG
-/*
-** Stub routines for all mutex methods.
-**
-** This routines provide no mutual exclusion or error checking.
-*/
-static int noopMutexInit(void){ return SQLITE_OK; }
-static int noopMutexEnd(void){ return SQLITE_OK; }
-static sqlite3_mutex *noopMutexAlloc(int id){
- UNUSED_PARAMETER(id);
- return (sqlite3_mutex*)8;
-}
-static void noopMutexFree(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; }
-static void noopMutexEnter(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; }
-static int noopMutexTry(sqlite3_mutex *p){
- UNUSED_PARAMETER(p);
- return SQLITE_OK;
-}
-static void noopMutexLeave(sqlite3_mutex *p){ UNUSED_PARAMETER(p); return; }
-
-SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void){
- static const sqlite3_mutex_methods sMutex = {
- noopMutexInit,
- noopMutexEnd,
- noopMutexAlloc,
- noopMutexFree,
- noopMutexEnter,
- noopMutexTry,
- noopMutexLeave,
-
- 0,
- 0,
- };
-
- return &sMutex;
-}
-#endif /* !SQLITE_DEBUG */
-
-#ifdef SQLITE_DEBUG
-/*
-** In this implementation, error checking is provided for testing
-** and debugging purposes. The mutexes still do not provide any
-** mutual exclusion.
-*/
-
-/*
-** The mutex object
-*/
-typedef struct sqlite3_debug_mutex {
- int id; /* The mutex type */
- int cnt; /* Number of entries without a matching leave */
-} sqlite3_debug_mutex;
-
-/*
-** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
-** intended for use inside assert() statements.
-*/
-static int debugMutexHeld(sqlite3_mutex *pX){
- sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
- return p==0 || p->cnt>0;
-}
-static int debugMutexNotheld(sqlite3_mutex *pX){
- sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
- return p==0 || p->cnt==0;
-}
-
-/*
-** Initialize and deinitialize the mutex subsystem.
-*/
-static int debugMutexInit(void){ return SQLITE_OK; }
-static int debugMutexEnd(void){ return SQLITE_OK; }
-
-/*
-** The sqlite3_mutex_alloc() routine allocates a new
-** mutex and returns a pointer to it. If it returns NULL
-** that means that a mutex could not be allocated.
-*/
-static sqlite3_mutex *debugMutexAlloc(int id){
- static sqlite3_debug_mutex aStatic[SQLITE_MUTEX_STATIC_VFS3 - 1];
- sqlite3_debug_mutex *pNew = 0;
- switch( id ){
- case SQLITE_MUTEX_FAST:
- case SQLITE_MUTEX_RECURSIVE: {
- pNew = sqlite3Malloc(sizeof(*pNew));
- if( pNew ){
- pNew->id = id;
- pNew->cnt = 0;
- }
- break;
- }
- default: {
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( id-2<0 || id-2>=ArraySize(aStatic) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
- pNew = &aStatic[id-2];
- pNew->id = id;
- break;
- }
- }
- return (sqlite3_mutex*)pNew;
-}
-
-/*
-** This routine deallocates a previously allocated mutex.
-*/
-static void debugMutexFree(sqlite3_mutex *pX){
- sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
- assert( p->cnt==0 );
- if( p->id==SQLITE_MUTEX_RECURSIVE || p->id==SQLITE_MUTEX_FAST ){
- sqlite3_free(p);
- }else{
-#ifdef SQLITE_ENABLE_API_ARMOR
- (void)SQLITE_MISUSE_BKPT;
-#endif
- }
-}
-
-/*
-** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
-** to enter a mutex. If another thread is already within the mutex,
-** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return
-** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK
-** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can
-** be entered multiple times by the same thread. In such cases the,
-** mutex must be exited an equal number of times before another thread
-** can enter. If the same thread tries to enter any other kind of mutex
-** more than once, the behavior is undefined.
-*/
-static void debugMutexEnter(sqlite3_mutex *pX){
- sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
- assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) );
- p->cnt++;
-}
-static int debugMutexTry(sqlite3_mutex *pX){
- sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
- assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) );
- p->cnt++;
- return SQLITE_OK;
-}
-
-/*
-** The sqlite3_mutex_leave() routine exits a mutex that was
-** previously entered by the same thread. The behavior
-** is undefined if the mutex is not currently entered or
-** is not currently allocated. SQLite will never do either.
-*/
-static void debugMutexLeave(sqlite3_mutex *pX){
- sqlite3_debug_mutex *p = (sqlite3_debug_mutex*)pX;
- assert( debugMutexHeld(pX) );
- p->cnt--;
- assert( p->id==SQLITE_MUTEX_RECURSIVE || debugMutexNotheld(pX) );
-}
-
-SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3NoopMutex(void){
- static const sqlite3_mutex_methods sMutex = {
- debugMutexInit,
- debugMutexEnd,
- debugMutexAlloc,
- debugMutexFree,
- debugMutexEnter,
- debugMutexTry,
- debugMutexLeave,
-
- debugMutexHeld,
- debugMutexNotheld
- };
-
- return &sMutex;
-}
-#endif /* SQLITE_DEBUG */
-
-/*
-** If compiled with SQLITE_MUTEX_NOOP, then the no-op mutex implementation
-** is used regardless of the run-time threadsafety setting.
-*/
-#ifdef SQLITE_MUTEX_NOOP
-SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
- return sqlite3NoopMutex();
-}
-#endif /* defined(SQLITE_MUTEX_NOOP) */
-#endif /* !defined(SQLITE_MUTEX_OMIT) */
-
-/************** End of mutex_noop.c ******************************************/
-/************** Begin file mutex_unix.c **************************************/
-/*
-** 2007 August 28
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This file contains the C functions that implement mutexes for pthreads
-*/
-/* #include "sqliteInt.h" */
-
-/*
-** The code in this file is only used if we are compiling threadsafe
-** under unix with pthreads.
-**
-** Note that this implementation requires a version of pthreads that
-** supports recursive mutexes.
-*/
-#ifdef SQLITE_MUTEX_PTHREADS
-
-#include
-
-/*
-** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields
-** are necessary under two condidtions: (1) Debug builds and (2) using
-** home-grown mutexes. Encapsulate these conditions into a single #define.
-*/
-#if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX)
-# define SQLITE_MUTEX_NREF 1
-#else
-# define SQLITE_MUTEX_NREF 0
-#endif
-
-/*
-** Each recursive mutex is an instance of the following structure.
-*/
-struct sqlite3_mutex {
- pthread_mutex_t mutex; /* Mutex controlling the lock */
-#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR)
- int id; /* Mutex type */
-#endif
-#if SQLITE_MUTEX_NREF
- volatile int nRef; /* Number of entrances */
- volatile pthread_t owner; /* Thread that is within this mutex */
- int trace; /* True to trace changes */
-#endif
-};
-#if SQLITE_MUTEX_NREF
-#define SQLITE3_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER,0,0,(pthread_t)0,0}
-#elif defined(SQLITE_ENABLE_API_ARMOR)
-#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, 0 }
-#else
-#define SQLITE3_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER }
-#endif
-
-/*
-** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
-** intended for use only inside assert() statements. On some platforms,
-** there might be race conditions that can cause these routines to
-** deliver incorrect results. In particular, if pthread_equal() is
-** not an atomic operation, then these routines might delivery
-** incorrect results. On most platforms, pthread_equal() is a
-** comparison of two integers and is therefore atomic. But we are
-** told that HPUX is not such a platform. If so, then these routines
-** will not always work correctly on HPUX.
-**
-** On those platforms where pthread_equal() is not atomic, SQLite
-** should be compiled without -DSQLITE_DEBUG and with -DNDEBUG to
-** make sure no assert() statements are evaluated and hence these
-** routines are never called.
-*/
-#if !defined(NDEBUG) || defined(SQLITE_DEBUG)
-static int pthreadMutexHeld(sqlite3_mutex *p){
- return (p->nRef!=0 && pthread_equal(p->owner, pthread_self()));
-}
-static int pthreadMutexNotheld(sqlite3_mutex *p){
- return p->nRef==0 || pthread_equal(p->owner, pthread_self())==0;
-}
-#endif
-
-/*
-** Try to provide a memory barrier operation, needed for initialization
-** and also for the implementation of xShmBarrier in the VFS in cases
-** where SQLite is compiled without mutexes.
-*/
-SQLITE_PRIVATE void sqlite3MemoryBarrier(void){
-#if defined(SQLITE_MEMORY_BARRIER)
- SQLITE_MEMORY_BARRIER;
-#elif defined(__GNUC__) && GCC_VERSION>=4001000
- __sync_synchronize();
-#endif
-}
-
-/*
-** Initialize and deinitialize the mutex subsystem.
-*/
-static int pthreadMutexInit(void){ return SQLITE_OK; }
-static int pthreadMutexEnd(void){ return SQLITE_OK; }
-
-/*
-** The sqlite3_mutex_alloc() routine allocates a new
-** mutex and returns a pointer to it. If it returns NULL
-** that means that a mutex could not be allocated. SQLite
-** will unwind its stack and return an error. The argument
-** to sqlite3_mutex_alloc() is one of these integer constants:
-**
-**
-** SQLITE_MUTEX_FAST
-** SQLITE_MUTEX_RECURSIVE
-** SQLITE_MUTEX_STATIC_MASTER
-** SQLITE_MUTEX_STATIC_MEM
-** SQLITE_MUTEX_STATIC_OPEN
-** SQLITE_MUTEX_STATIC_PRNG
-** SQLITE_MUTEX_STATIC_LRU
-** SQLITE_MUTEX_STATIC_PMEM
-** SQLITE_MUTEX_STATIC_APP1
-** SQLITE_MUTEX_STATIC_APP2
-** SQLITE_MUTEX_STATIC_APP3
-** SQLITE_MUTEX_STATIC_VFS1
-** SQLITE_MUTEX_STATIC_VFS2
-** SQLITE_MUTEX_STATIC_VFS3
-**
-**
-** The first two constants cause sqlite3_mutex_alloc() to create
-** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE
-** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
-** The mutex implementation does not need to make a distinction
-** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does
-** not want to. But SQLite will only request a recursive mutex in
-** cases where it really needs one. If a faster non-recursive mutex
-** implementation is available on the host platform, the mutex subsystem
-** might return such a mutex in response to SQLITE_MUTEX_FAST.
-**
-** The other allowed parameters to sqlite3_mutex_alloc() each return
-** a pointer to a static preexisting mutex. Six static mutexes are
-** used by the current version of SQLite. Future versions of SQLite
-** may add additional static mutexes. Static mutexes are for internal
-** use by SQLite only. Applications that use SQLite mutexes should
-** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or
-** SQLITE_MUTEX_RECURSIVE.
-**
-** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST
-** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc()
-** returns a different mutex on every call. But for the static
-** mutex types, the same mutex is returned on every call that has
-** the same type number.
-*/
-static sqlite3_mutex *pthreadMutexAlloc(int iType){
- static sqlite3_mutex staticMutexes[] = {
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER
- };
- sqlite3_mutex *p;
- switch( iType ){
- case SQLITE_MUTEX_RECURSIVE: {
- p = sqlite3MallocZero( sizeof(*p) );
- if( p ){
-#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX
- /* If recursive mutexes are not available, we will have to
- ** build our own. See below. */
- pthread_mutex_init(&p->mutex, 0);
-#else
- /* Use a recursive mutex if it is available */
- pthread_mutexattr_t recursiveAttr;
- pthread_mutexattr_init(&recursiveAttr);
- pthread_mutexattr_settype(&recursiveAttr, PTHREAD_MUTEX_RECURSIVE);
- pthread_mutex_init(&p->mutex, &recursiveAttr);
- pthread_mutexattr_destroy(&recursiveAttr);
-#endif
- }
- break;
- }
- case SQLITE_MUTEX_FAST: {
- p = sqlite3MallocZero( sizeof(*p) );
- if( p ){
- pthread_mutex_init(&p->mutex, 0);
- }
- break;
- }
- default: {
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( iType-2<0 || iType-2>=ArraySize(staticMutexes) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
- p = &staticMutexes[iType-2];
- break;
- }
- }
-#if SQLITE_MUTEX_NREF || defined(SQLITE_ENABLE_API_ARMOR)
- if( p ) p->id = iType;
-#endif
- return p;
-}
-
-
-/*
-** This routine deallocates a previously
-** allocated mutex. SQLite is careful to deallocate every
-** mutex that it allocates.
-*/
-static void pthreadMutexFree(sqlite3_mutex *p){
- assert( p->nRef==0 );
-#if SQLITE_ENABLE_API_ARMOR
- if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE )
-#endif
- {
- pthread_mutex_destroy(&p->mutex);
- sqlite3_free(p);
- }
-#ifdef SQLITE_ENABLE_API_ARMOR
- else{
- (void)SQLITE_MISUSE_BKPT;
- }
-#endif
-}
-
-/*
-** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
-** to enter a mutex. If another thread is already within the mutex,
-** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return
-** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK
-** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can
-** be entered multiple times by the same thread. In such cases the,
-** mutex must be exited an equal number of times before another thread
-** can enter. If the same thread tries to enter any other kind of mutex
-** more than once, the behavior is undefined.
-*/
-static void pthreadMutexEnter(sqlite3_mutex *p){
- assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) );
-
-#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX
- /* If recursive mutexes are not available, then we have to grow
- ** our own. This implementation assumes that pthread_equal()
- ** is atomic - that it cannot be deceived into thinking self
- ** and p->owner are equal if p->owner changes between two values
- ** that are not equal to self while the comparison is taking place.
- ** This implementation also assumes a coherent cache - that
- ** separate processes cannot read different values from the same
- ** address at the same time. If either of these two conditions
- ** are not met, then the mutexes will fail and problems will result.
- */
- {
- pthread_t self = pthread_self();
- if( p->nRef>0 && pthread_equal(p->owner, self) ){
- p->nRef++;
- }else{
- pthread_mutex_lock(&p->mutex);
- assert( p->nRef==0 );
- p->owner = self;
- p->nRef = 1;
- }
- }
-#else
- /* Use the built-in recursive mutexes if they are available.
- */
- pthread_mutex_lock(&p->mutex);
-#if SQLITE_MUTEX_NREF
- assert( p->nRef>0 || p->owner==0 );
- p->owner = pthread_self();
- p->nRef++;
-#endif
-#endif
-
-#ifdef SQLITE_DEBUG
- if( p->trace ){
- printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef);
- }
-#endif
-}
-static int pthreadMutexTry(sqlite3_mutex *p){
- int rc;
- assert( p->id==SQLITE_MUTEX_RECURSIVE || pthreadMutexNotheld(p) );
-
-#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX
- /* If recursive mutexes are not available, then we have to grow
- ** our own. This implementation assumes that pthread_equal()
- ** is atomic - that it cannot be deceived into thinking self
- ** and p->owner are equal if p->owner changes between two values
- ** that are not equal to self while the comparison is taking place.
- ** This implementation also assumes a coherent cache - that
- ** separate processes cannot read different values from the same
- ** address at the same time. If either of these two conditions
- ** are not met, then the mutexes will fail and problems will result.
- */
- {
- pthread_t self = pthread_self();
- if( p->nRef>0 && pthread_equal(p->owner, self) ){
- p->nRef++;
- rc = SQLITE_OK;
- }else if( pthread_mutex_trylock(&p->mutex)==0 ){
- assert( p->nRef==0 );
- p->owner = self;
- p->nRef = 1;
- rc = SQLITE_OK;
- }else{
- rc = SQLITE_BUSY;
- }
- }
-#else
- /* Use the built-in recursive mutexes if they are available.
- */
- if( pthread_mutex_trylock(&p->mutex)==0 ){
-#if SQLITE_MUTEX_NREF
- p->owner = pthread_self();
- p->nRef++;
-#endif
- rc = SQLITE_OK;
- }else{
- rc = SQLITE_BUSY;
- }
-#endif
-
-#ifdef SQLITE_DEBUG
- if( rc==SQLITE_OK && p->trace ){
- printf("enter mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef);
- }
-#endif
- return rc;
-}
-
-/*
-** The sqlite3_mutex_leave() routine exits a mutex that was
-** previously entered by the same thread. The behavior
-** is undefined if the mutex is not currently entered or
-** is not currently allocated. SQLite will never do either.
-*/
-static void pthreadMutexLeave(sqlite3_mutex *p){
- assert( pthreadMutexHeld(p) );
-#if SQLITE_MUTEX_NREF
- p->nRef--;
- if( p->nRef==0 ) p->owner = 0;
-#endif
- assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE );
-
-#ifdef SQLITE_HOMEGROWN_RECURSIVE_MUTEX
- if( p->nRef==0 ){
- pthread_mutex_unlock(&p->mutex);
- }
-#else
- pthread_mutex_unlock(&p->mutex);
-#endif
-
-#ifdef SQLITE_DEBUG
- if( p->trace ){
- printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef);
- }
-#endif
-}
-
-SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
- static const sqlite3_mutex_methods sMutex = {
- pthreadMutexInit,
- pthreadMutexEnd,
- pthreadMutexAlloc,
- pthreadMutexFree,
- pthreadMutexEnter,
- pthreadMutexTry,
- pthreadMutexLeave,
-#ifdef SQLITE_DEBUG
- pthreadMutexHeld,
- pthreadMutexNotheld
-#else
- 0,
- 0
-#endif
- };
-
- return &sMutex;
-}
-
-#endif /* SQLITE_MUTEX_PTHREADS */
-
-/************** End of mutex_unix.c ******************************************/
-/************** Begin file mutex_w32.c ***************************************/
-/*
-** 2007 August 14
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-** This file contains the C functions that implement mutexes for Win32.
-*/
-/* #include "sqliteInt.h" */
-
-#if SQLITE_OS_WIN
-/*
-** Include code that is common to all os_*.c files
-*/
-/************** Include os_common.h in the middle of mutex_w32.c *************/
-/************** Begin file os_common.h ***************************************/
-/*
-** 2004 May 22
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains macros and a little bit of code that is common to
-** all of the platform-specific files (os_*.c) and is #included into those
-** files.
-**
-** This file should be #included by the os_*.c files only. It is not a
-** general purpose header file.
-*/
-#ifndef _OS_COMMON_H_
-#define _OS_COMMON_H_
-
-/*
-** At least two bugs have slipped in because we changed the MEMORY_DEBUG
-** macro to SQLITE_DEBUG and some older makefiles have not yet made the
-** switch. The following code should catch this problem at compile-time.
-*/
-#ifdef MEMORY_DEBUG
-# error "The MEMORY_DEBUG macro is obsolete. Use SQLITE_DEBUG instead."
-#endif
-
-/*
-** Macros for performance tracing. Normally turned off. Only works
-** on i486 hardware.
-*/
-#ifdef SQLITE_PERFORMANCE_TRACE
-
-/*
-** hwtime.h contains inline assembler code for implementing
-** high-performance timing routines.
-*/
-/************** Include hwtime.h in the middle of os_common.h ****************/
-/************** Begin file hwtime.h ******************************************/
-/*
-** 2008 May 27
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains inline asm code for retrieving "high-performance"
-** counters for x86 class CPUs.
-*/
-#ifndef SQLITE_HWTIME_H
-#define SQLITE_HWTIME_H
-
-/*
-** The following routine only works on pentium-class (or newer) processors.
-** It uses the RDTSC opcode to read the cycle count value out of the
-** processor and returns that value. This can be used for high-res
-** profiling.
-*/
-#if (defined(__GNUC__) || defined(_MSC_VER)) && \
- (defined(i386) || defined(__i386__) || defined(_M_IX86))
-
- #if defined(__GNUC__)
-
- __inline__ sqlite_uint64 sqlite3Hwtime(void){
- unsigned int lo, hi;
- __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
- return (sqlite_uint64)hi << 32 | lo;
- }
-
- #elif defined(_MSC_VER)
-
- __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){
- __asm {
- rdtsc
- ret ; return value at EDX:EAX
- }
- }
-
- #endif
-
-#elif (defined(__GNUC__) && defined(__x86_64__))
-
- __inline__ sqlite_uint64 sqlite3Hwtime(void){
- unsigned long val;
- __asm__ __volatile__ ("rdtsc" : "=A" (val));
- return val;
- }
-
-#elif (defined(__GNUC__) && defined(__ppc__))
-
- __inline__ sqlite_uint64 sqlite3Hwtime(void){
- unsigned long long retval;
- unsigned long junk;
- __asm__ __volatile__ ("\n\
- 1: mftbu %1\n\
- mftb %L0\n\
- mftbu %0\n\
- cmpw %0,%1\n\
- bne 1b"
- : "=r" (retval), "=r" (junk));
- return retval;
- }
-
-#else
-
- #error Need implementation of sqlite3Hwtime() for your platform.
-
- /*
- ** To compile without implementing sqlite3Hwtime() for your platform,
- ** you can remove the above #error and use the following
- ** stub function. You will lose timing support for many
- ** of the debugging and testing utilities, but it should at
- ** least compile and run.
- */
-SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); }
-
-#endif
-
-#endif /* !defined(SQLITE_HWTIME_H) */
-
-/************** End of hwtime.h **********************************************/
-/************** Continuing where we left off in os_common.h ******************/
-
-static sqlite_uint64 g_start;
-static sqlite_uint64 g_elapsed;
-#define TIMER_START g_start=sqlite3Hwtime()
-#define TIMER_END g_elapsed=sqlite3Hwtime()-g_start
-#define TIMER_ELAPSED g_elapsed
-#else
-#define TIMER_START
-#define TIMER_END
-#define TIMER_ELAPSED ((sqlite_uint64)0)
-#endif
-
-/*
-** If we compile with the SQLITE_TEST macro set, then the following block
-** of code will give us the ability to simulate a disk I/O error. This
-** is used for testing the I/O recovery logic.
-*/
-#if defined(SQLITE_TEST)
-SQLITE_API extern int sqlite3_io_error_hit;
-SQLITE_API extern int sqlite3_io_error_hardhit;
-SQLITE_API extern int sqlite3_io_error_pending;
-SQLITE_API extern int sqlite3_io_error_persist;
-SQLITE_API extern int sqlite3_io_error_benign;
-SQLITE_API extern int sqlite3_diskfull_pending;
-SQLITE_API extern int sqlite3_diskfull;
-#define SimulateIOErrorBenign(X) sqlite3_io_error_benign=(X)
-#define SimulateIOError(CODE) \
- if( (sqlite3_io_error_persist && sqlite3_io_error_hit) \
- || sqlite3_io_error_pending-- == 1 ) \
- { local_ioerr(); CODE; }
-static void local_ioerr(){
- IOTRACE(("IOERR\n"));
- sqlite3_io_error_hit++;
- if( !sqlite3_io_error_benign ) sqlite3_io_error_hardhit++;
-}
-#define SimulateDiskfullError(CODE) \
- if( sqlite3_diskfull_pending ){ \
- if( sqlite3_diskfull_pending == 1 ){ \
- local_ioerr(); \
- sqlite3_diskfull = 1; \
- sqlite3_io_error_hit = 1; \
- CODE; \
- }else{ \
- sqlite3_diskfull_pending--; \
- } \
- }
-#else
-#define SimulateIOErrorBenign(X)
-#define SimulateIOError(A)
-#define SimulateDiskfullError(A)
-#endif /* defined(SQLITE_TEST) */
-
-/*
-** When testing, keep a count of the number of open files.
-*/
-#if defined(SQLITE_TEST)
-SQLITE_API extern int sqlite3_open_file_count;
-#define OpenCounter(X) sqlite3_open_file_count+=(X)
-#else
-#define OpenCounter(X)
-#endif /* defined(SQLITE_TEST) */
-
-#endif /* !defined(_OS_COMMON_H_) */
-
-/************** End of os_common.h *******************************************/
-/************** Continuing where we left off in mutex_w32.c ******************/
-
-/*
-** Include the header file for the Windows VFS.
-*/
-/************** Include os_win.h in the middle of mutex_w32.c ****************/
-/************** Begin file os_win.h ******************************************/
-/*
-** 2013 November 25
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains code that is specific to Windows.
-*/
-#ifndef SQLITE_OS_WIN_H
-#define SQLITE_OS_WIN_H
-
-/*
-** Include the primary Windows SDK header file.
-*/
-#include "windows.h"
-
-#ifdef __CYGWIN__
-# include
-# include /* amalgamator: dontcache */
-#endif
-
-/*
-** Determine if we are dealing with Windows NT.
-**
-** We ought to be able to determine if we are compiling for Windows 9x or
-** Windows NT using the _WIN32_WINNT macro as follows:
-**
-** #if defined(_WIN32_WINNT)
-** # define SQLITE_OS_WINNT 1
-** #else
-** # define SQLITE_OS_WINNT 0
-** #endif
-**
-** However, Visual Studio 2005 does not set _WIN32_WINNT by default, as
-** it ought to, so the above test does not work. We'll just assume that
-** everything is Windows NT unless the programmer explicitly says otherwise
-** by setting SQLITE_OS_WINNT to 0.
-*/
-#if SQLITE_OS_WIN && !defined(SQLITE_OS_WINNT)
-# define SQLITE_OS_WINNT 1
-#endif
-
-/*
-** Determine if we are dealing with Windows CE - which has a much reduced
-** API.
-*/
-#if defined(_WIN32_WCE)
-# define SQLITE_OS_WINCE 1
-#else
-# define SQLITE_OS_WINCE 0
-#endif
-
-/*
-** Determine if we are dealing with WinRT, which provides only a subset of
-** the full Win32 API.
-*/
-#if !defined(SQLITE_OS_WINRT)
-# define SQLITE_OS_WINRT 0
-#endif
-
-/*
-** For WinCE, some API function parameters do not appear to be declared as
-** volatile.
-*/
-#if SQLITE_OS_WINCE
-# define SQLITE_WIN32_VOLATILE
-#else
-# define SQLITE_WIN32_VOLATILE volatile
-#endif
-
-/*
-** For some Windows sub-platforms, the _beginthreadex() / _endthreadex()
-** functions are not available (e.g. those not using MSVC, Cygwin, etc).
-*/
-#if SQLITE_OS_WIN && !SQLITE_OS_WINCE && !SQLITE_OS_WINRT && \
- SQLITE_THREADSAFE>0 && !defined(__CYGWIN__)
-# define SQLITE_OS_WIN_THREADS 1
-#else
-# define SQLITE_OS_WIN_THREADS 0
-#endif
-
-#endif /* SQLITE_OS_WIN_H */
-
-/************** End of os_win.h **********************************************/
-/************** Continuing where we left off in mutex_w32.c ******************/
-#endif
-
-/*
-** The code in this file is only used if we are compiling multithreaded
-** on a Win32 system.
-*/
-#ifdef SQLITE_MUTEX_W32
-
-/*
-** Each recursive mutex is an instance of the following structure.
-*/
-struct sqlite3_mutex {
- CRITICAL_SECTION mutex; /* Mutex controlling the lock */
- int id; /* Mutex type */
-#ifdef SQLITE_DEBUG
- volatile int nRef; /* Number of enterances */
- volatile DWORD owner; /* Thread holding this mutex */
- volatile int trace; /* True to trace changes */
-#endif
-};
-
-/*
-** These are the initializer values used when declaring a "static" mutex
-** on Win32. It should be noted that all mutexes require initialization
-** on the Win32 platform.
-*/
-#define SQLITE_W32_MUTEX_INITIALIZER { 0 }
-
-#ifdef SQLITE_DEBUG
-#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0, \
- 0L, (DWORD)0, 0 }
-#else
-#define SQLITE3_MUTEX_INITIALIZER { SQLITE_W32_MUTEX_INITIALIZER, 0 }
-#endif
-
-#ifdef SQLITE_DEBUG
-/*
-** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routine are
-** intended for use only inside assert() statements.
-*/
-static int winMutexHeld(sqlite3_mutex *p){
- return p->nRef!=0 && p->owner==GetCurrentThreadId();
-}
-
-static int winMutexNotheld2(sqlite3_mutex *p, DWORD tid){
- return p->nRef==0 || p->owner!=tid;
-}
-
-static int winMutexNotheld(sqlite3_mutex *p){
- DWORD tid = GetCurrentThreadId();
- return winMutexNotheld2(p, tid);
-}
-#endif
-
-/*
-** Try to provide a memory barrier operation, needed for initialization
-** and also for the xShmBarrier method of the VFS in cases when SQLite is
-** compiled without mutexes (SQLITE_THREADSAFE=0).
-*/
-SQLITE_PRIVATE void sqlite3MemoryBarrier(void){
-#if defined(SQLITE_MEMORY_BARRIER)
- SQLITE_MEMORY_BARRIER;
-#elif defined(__GNUC__)
- __sync_synchronize();
-#elif !defined(SQLITE_DISABLE_INTRINSIC) && \
- defined(_MSC_VER) && _MSC_VER>=1300
- _ReadWriteBarrier();
-#elif defined(MemoryBarrier)
- MemoryBarrier();
-#endif
-}
-
-/*
-** Initialize and deinitialize the mutex subsystem.
-*/
-static sqlite3_mutex winMutex_staticMutexes[] = {
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER,
- SQLITE3_MUTEX_INITIALIZER
-};
-
-static int winMutex_isInit = 0;
-static int winMutex_isNt = -1; /* <0 means "need to query" */
-
-/* As the winMutexInit() and winMutexEnd() functions are called as part
-** of the sqlite3_initialize() and sqlite3_shutdown() processing, the
-** "interlocked" magic used here is probably not strictly necessary.
-*/
-static LONG SQLITE_WIN32_VOLATILE winMutex_lock = 0;
-
-SQLITE_API int SQLITE_STDCALL sqlite3_win32_is_nt(void); /* os_win.c */
-SQLITE_API void SQLITE_STDCALL sqlite3_win32_sleep(DWORD milliseconds); /* os_win.c */
-
-static int winMutexInit(void){
- /* The first to increment to 1 does actual initialization */
- if( InterlockedCompareExchange(&winMutex_lock, 1, 0)==0 ){
- int i;
- for(i=0; i
-** SQLITE_MUTEX_FAST
-** SQLITE_MUTEX_RECURSIVE
-** SQLITE_MUTEX_STATIC_MASTER
-** SQLITE_MUTEX_STATIC_MEM
-** SQLITE_MUTEX_STATIC_OPEN
-** SQLITE_MUTEX_STATIC_PRNG
-** SQLITE_MUTEX_STATIC_LRU
-** SQLITE_MUTEX_STATIC_PMEM
-** SQLITE_MUTEX_STATIC_APP1
-** SQLITE_MUTEX_STATIC_APP2
-** SQLITE_MUTEX_STATIC_APP3
-** SQLITE_MUTEX_STATIC_VFS1
-** SQLITE_MUTEX_STATIC_VFS2
-** SQLITE_MUTEX_STATIC_VFS3
-**
-**
-** The first two constants cause sqlite3_mutex_alloc() to create
-** a new mutex. The new mutex is recursive when SQLITE_MUTEX_RECURSIVE
-** is used but not necessarily so when SQLITE_MUTEX_FAST is used.
-** The mutex implementation does not need to make a distinction
-** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does
-** not want to. But SQLite will only request a recursive mutex in
-** cases where it really needs one. If a faster non-recursive mutex
-** implementation is available on the host platform, the mutex subsystem
-** might return such a mutex in response to SQLITE_MUTEX_FAST.
-**
-** The other allowed parameters to sqlite3_mutex_alloc() each return
-** a pointer to a static preexisting mutex. Six static mutexes are
-** used by the current version of SQLite. Future versions of SQLite
-** may add additional static mutexes. Static mutexes are for internal
-** use by SQLite only. Applications that use SQLite mutexes should
-** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or
-** SQLITE_MUTEX_RECURSIVE.
-**
-** Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST
-** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc()
-** returns a different mutex on every call. But for the static
-** mutex types, the same mutex is returned on every call that has
-** the same type number.
-*/
-static sqlite3_mutex *winMutexAlloc(int iType){
- sqlite3_mutex *p;
-
- switch( iType ){
- case SQLITE_MUTEX_FAST:
- case SQLITE_MUTEX_RECURSIVE: {
- p = sqlite3MallocZero( sizeof(*p) );
- if( p ){
- p->id = iType;
-#ifdef SQLITE_DEBUG
-#ifdef SQLITE_WIN32_MUTEX_TRACE_DYNAMIC
- p->trace = 1;
-#endif
-#endif
-#if SQLITE_OS_WINRT
- InitializeCriticalSectionEx(&p->mutex, 0, 0);
-#else
- InitializeCriticalSection(&p->mutex);
-#endif
- }
- break;
- }
- default: {
-#ifdef SQLITE_ENABLE_API_ARMOR
- if( iType-2<0 || iType-2>=ArraySize(winMutex_staticMutexes) ){
- (void)SQLITE_MISUSE_BKPT;
- return 0;
- }
-#endif
- p = &winMutex_staticMutexes[iType-2];
- p->id = iType;
-#ifdef SQLITE_DEBUG
-#ifdef SQLITE_WIN32_MUTEX_TRACE_STATIC
- p->trace = 1;
-#endif
-#endif
- break;
- }
- }
- return p;
-}
-
-
-/*
-** This routine deallocates a previously
-** allocated mutex. SQLite is careful to deallocate every
-** mutex that it allocates.
-*/
-static void winMutexFree(sqlite3_mutex *p){
- assert( p );
- assert( p->nRef==0 && p->owner==0 );
- if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ){
- DeleteCriticalSection(&p->mutex);
- sqlite3_free(p);
- }else{
-#ifdef SQLITE_ENABLE_API_ARMOR
- (void)SQLITE_MISUSE_BKPT;
-#endif
- }
-}
-
-/*
-** The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt
-** to enter a mutex. If another thread is already within the mutex,
-** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return
-** SQLITE_BUSY. The sqlite3_mutex_try() interface returns SQLITE_OK
-** upon successful entry. Mutexes created using SQLITE_MUTEX_RECURSIVE can
-** be entered multiple times by the same thread. In such cases the,
-** mutex must be exited an equal number of times before another thread
-** can enter. If the same thread tries to enter any other kind of mutex
-** more than once, the behavior is undefined.
-*/
-static void winMutexEnter(sqlite3_mutex *p){
-#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
- DWORD tid = GetCurrentThreadId();
-#endif
-#ifdef SQLITE_DEBUG
- assert( p );
- assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) );
-#else
- assert( p );
-#endif
- assert( winMutex_isInit==1 );
- EnterCriticalSection(&p->mutex);
-#ifdef SQLITE_DEBUG
- assert( p->nRef>0 || p->owner==0 );
- p->owner = tid;
- p->nRef++;
- if( p->trace ){
- OSTRACE(("ENTER-MUTEX tid=%lu, mutex=%p (%d), nRef=%d\n",
- tid, p, p->trace, p->nRef));
- }
-#endif
-}
-
-static int winMutexTry(sqlite3_mutex *p){
-#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
- DWORD tid = GetCurrentThreadId();
-#endif
- int rc = SQLITE_BUSY;
- assert( p );
- assert( p->id==SQLITE_MUTEX_RECURSIVE || winMutexNotheld2(p, tid) );
- /*
- ** The sqlite3_mutex_try() routine is very rarely used, and when it
- ** is used it is merely an optimization. So it is OK for it to always
- ** fail.
- **
- ** The TryEnterCriticalSection() interface is only available on WinNT.
- ** And some windows compilers complain if you try to use it without
- ** first doing some #defines that prevent SQLite from building on Win98.
- ** For that reason, we will omit this optimization for now. See
- ** ticket #2685.
- */
-#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0400
- assert( winMutex_isInit==1 );
- assert( winMutex_isNt>=-1 && winMutex_isNt<=1 );
- if( winMutex_isNt<0 ){
- winMutex_isNt = sqlite3_win32_is_nt();
- }
- assert( winMutex_isNt==0 || winMutex_isNt==1 );
- if( winMutex_isNt && TryEnterCriticalSection(&p->mutex) ){
-#ifdef SQLITE_DEBUG
- p->owner = tid;
- p->nRef++;
-#endif
- rc = SQLITE_OK;
- }
-#else
- UNUSED_PARAMETER(p);
-#endif
-#ifdef SQLITE_DEBUG
- if( p->trace ){
- OSTRACE(("TRY-MUTEX tid=%lu, mutex=%p (%d), owner=%lu, nRef=%d, rc=%s\n",
- tid, p, p->trace, p->owner, p->nRef, sqlite3ErrName(rc)));
- }
-#endif
- return rc;
-}
-
-/*
-** The sqlite3_mutex_leave() routine exits a mutex that was
-** previously entered by the same thread. The behavior
-** is undefined if the mutex is not currently entered or
-** is not currently allocated. SQLite will never do either.
-*/
-static void winMutexLeave(sqlite3_mutex *p){
-#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
- DWORD tid = GetCurrentThreadId();
-#endif
- assert( p );
-#ifdef SQLITE_DEBUG
- assert( p->nRef>0 );
- assert( p->owner==tid );
- p->nRef--;
- if( p->nRef==0 ) p->owner = 0;
- assert( p->nRef==0 || p->id==SQLITE_MUTEX_RECURSIVE );
-#endif
- assert( winMutex_isInit==1 );
- LeaveCriticalSection(&p->mutex);
-#ifdef SQLITE_DEBUG
- if( p->trace ){
- OSTRACE(("LEAVE-MUTEX tid=%lu, mutex=%p (%d), nRef=%d\n",
- tid, p, p->trace, p->nRef));
- }
-#endif
-}
-
-SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){
- static const sqlite3_mutex_methods sMutex = {
- winMutexInit,
- winMutexEnd,
- winMutexAlloc,
- winMutexFree,
- winMutexEnter,
- winMutexTry,
- winMutexLeave,
-#ifdef SQLITE_DEBUG
- winMutexHeld,
- winMutexNotheld
-#else
- 0,
- 0
-#endif
- };
- return &sMutex;
-}
-
-#endif /* SQLITE_MUTEX_W32 */
-
-/************** End of mutex_w32.c *******************************************/
-/************** Begin file malloc.c ******************************************/
-/*
-** 2001 September 15
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-*************************************************************************
-**
-** Memory allocation functions used throughout sqlite.
-*/
-/* #include "sqliteInt.h" */
-/* #include */
-
-/*
-** Attempt to release up to n bytes of non-essential memory currently
-** held by SQLite. An example of non-essential memory is memory used to
-** cache database pages that are not currently in use.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_release_memory(int n){
-#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
- return sqlite3PcacheReleaseMemory(n);
-#else
- /* IMPLEMENTATION-OF: R-34391-24921 The sqlite3_release_memory() routine
- ** is a no-op returning zero if SQLite is not compiled with
- ** SQLITE_ENABLE_MEMORY_MANAGEMENT. */
- UNUSED_PARAMETER(n);
- return 0;
-#endif
-}
-
-/*
-** An instance of the following object records the location of
-** each unused scratch buffer.
-*/
-typedef struct ScratchFreeslot {
- struct ScratchFreeslot *pNext; /* Next unused scratch buffer */
-} ScratchFreeslot;
-
-/*
-** State information local to the memory allocation subsystem.
-*/
-static SQLITE_WSD struct Mem0Global {
- sqlite3_mutex *mutex; /* Mutex to serialize access */
- sqlite3_int64 alarmThreshold; /* The soft heap limit */
-
- /*
- ** Pointers to the end of sqlite3GlobalConfig.pScratch memory
- ** (so that a range test can be used to determine if an allocation
- ** being freed came from pScratch) and a pointer to the list of
- ** unused scratch allocations.
- */
- void *pScratchEnd;
- ScratchFreeslot *pScratchFree;
- u32 nScratchFree;
-
- /*
- ** True if heap is nearly "full" where "full" is defined by the
- ** sqlite3_soft_heap_limit() setting.
- */
- int nearlyFull;
-} mem0 = { 0, 0, 0, 0, 0, 0 };
-
-#define mem0 GLOBAL(struct Mem0Global, mem0)
-
-/*
-** Return the memory allocator mutex. sqlite3_status() needs it.
-*/
-SQLITE_PRIVATE sqlite3_mutex *sqlite3MallocMutex(void){
- return mem0.mutex;
-}
-
-#ifndef SQLITE_OMIT_DEPRECATED
-/*
-** Deprecated external interface. It used to set an alarm callback
-** that was invoked when memory usage grew too large. Now it is a
-** no-op.
-*/
-SQLITE_API int SQLITE_STDCALL sqlite3_memory_alarm(
- void(*xCallback)(void *pArg, sqlite3_int64 used,int N),
- void *pArg,
- sqlite3_int64 iThreshold
-){
- (void)xCallback;
- (void)pArg;
- (void)iThreshold;
- return SQLITE_OK;
-}
-#endif
-
-/*
-** Set the soft heap-size limit for the library. Passing a zero or
-** negative value indicates no limit.
-*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_soft_heap_limit64(sqlite3_int64 n){
- sqlite3_int64 priorLimit;
- sqlite3_int64 excess;
- sqlite3_int64 nUsed;
-#ifndef SQLITE_OMIT_AUTOINIT
- int rc = sqlite3_initialize();
- if( rc ) return -1;
-#endif
- sqlite3_mutex_enter(mem0.mutex);
- priorLimit = mem0.alarmThreshold;
- if( n<0 ){
- sqlite3_mutex_leave(mem0.mutex);
- return priorLimit;
- }
- mem0.alarmThreshold = n;
- nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
- mem0.nearlyFull = (n>0 && n<=nUsed);
- sqlite3_mutex_leave(mem0.mutex);
- excess = sqlite3_memory_used() - n;
- if( excess>0 ) sqlite3_release_memory((int)(excess & 0x7fffffff));
- return priorLimit;
-}
-SQLITE_API void SQLITE_STDCALL sqlite3_soft_heap_limit(int n){
- if( n<0 ) n = 0;
- sqlite3_soft_heap_limit64(n);
-}
-
-/*
-** Initialize the memory allocation subsystem.
-*/
-SQLITE_PRIVATE int sqlite3MallocInit(void){
- int rc;
- if( sqlite3GlobalConfig.m.xMalloc==0 ){
- sqlite3MemSetDefault();
- }
- memset(&mem0, 0, sizeof(mem0));
- mem0.mutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MEM);
- if( sqlite3GlobalConfig.pScratch && sqlite3GlobalConfig.szScratch>=100
- && sqlite3GlobalConfig.nScratch>0 ){
- int i, n, sz;
- ScratchFreeslot *pSlot;
- sz = ROUNDDOWN8(sqlite3GlobalConfig.szScratch);
- sqlite3GlobalConfig.szScratch = sz;
- pSlot = (ScratchFreeslot*)sqlite3GlobalConfig.pScratch;
- n = sqlite3GlobalConfig.nScratch;
- mem0.pScratchFree = pSlot;
- mem0.nScratchFree = n;
- for(i=0; ipNext = (ScratchFreeslot*)(sz+(char*)pSlot);
- pSlot = pSlot->pNext;
- }
- pSlot->pNext = 0;
- mem0.pScratchEnd = (void*)&pSlot[1];
- }else{
- mem0.pScratchEnd = 0;
- sqlite3GlobalConfig.pScratch = 0;
- sqlite3GlobalConfig.szScratch = 0;
- sqlite3GlobalConfig.nScratch = 0;
- }
- if( sqlite3GlobalConfig.pPage==0 || sqlite3GlobalConfig.szPage<512
- || sqlite3GlobalConfig.nPage<=0 ){
- sqlite3GlobalConfig.pPage = 0;
- sqlite3GlobalConfig.szPage = 0;
- }
- rc = sqlite3GlobalConfig.m.xInit(sqlite3GlobalConfig.m.pAppData);
- if( rc!=SQLITE_OK ) memset(&mem0, 0, sizeof(mem0));
- return rc;
-}
-
-/*
-** Return true if the heap is currently under memory pressure - in other
-** words if the amount of heap used is close to the limit set by
-** sqlite3_soft_heap_limit().
-*/
-SQLITE_PRIVATE int sqlite3HeapNearlyFull(void){
- return mem0.nearlyFull;
-}
-
-/*
-** Deinitialize the memory allocation subsystem.
-*/
-SQLITE_PRIVATE void sqlite3MallocEnd(void){
- if( sqlite3GlobalConfig.m.xShutdown ){
- sqlite3GlobalConfig.m.xShutdown(sqlite3GlobalConfig.m.pAppData);
- }
- memset(&mem0, 0, sizeof(mem0));
-}
-
-/*
-** Return the amount of memory currently checked out.
-*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_used(void){
- sqlite3_int64 res, mx;
- sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, 0);
- return res;
-}
-
-/*
-** Return the maximum amount of memory that has ever been
-** checked out since either the beginning of this process
-** or since the most recent reset.
-*/
-SQLITE_API sqlite3_int64 SQLITE_STDCALL sqlite3_memory_highwater(int resetFlag){
- sqlite3_int64 res, mx;
- sqlite3_status64(SQLITE_STATUS_MEMORY_USED, &res, &mx, resetFlag);
- return mx;
-}
-
-/*
-** Trigger the alarm
-*/
-static void sqlite3MallocAlarm(int nByte){
- if( mem0.alarmThreshold<=0 ) return;
- sqlite3_mutex_leave(mem0.mutex);
- sqlite3_release_memory(nByte);
- sqlite3_mutex_enter(mem0.mutex);
-}
-
-/*
-** Do a memory allocation with statistics and alarms. Assume the
-** lock is already held.
-*/
-static int mallocWithAlarm(int n, void **pp){
- int nFull;
- void *p;
- assert( sqlite3_mutex_held(mem0.mutex) );
- nFull = sqlite3GlobalConfig.m.xRoundup(n);
- sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, n);
- if( mem0.alarmThreshold>0 ){
- sqlite3_int64 nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
- if( nUsed >= mem0.alarmThreshold - nFull ){
- mem0.nearlyFull = 1;
- sqlite3MallocAlarm(nFull);
- }else{
- mem0.nearlyFull = 0;
- }
- }
- p = sqlite3GlobalConfig.m.xMalloc(nFull);
-#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
- if( p==0 && mem0.alarmThreshold>0 ){
- sqlite3MallocAlarm(nFull);
- p = sqlite3GlobalConfig.m.xMalloc(nFull);
- }
-#endif
- if( p ){
- nFull = sqlite3MallocSize(p);
- sqlite3StatusUp(SQLITE_STATUS_MEMORY_USED, nFull);
- sqlite3StatusUp(SQLITE_STATUS_MALLOC_COUNT, 1);
- }
- *pp = p;
- return nFull;
-}
-
-/*
-** Allocate memory. This routine is like sqlite3_malloc() except that it
-** assumes the memory subsystem has already been initialized.
-*/
-SQLITE_PRIVATE void *sqlite3Malloc(u64 n){
- void *p;
- if( n==0 || n>=0x7fffff00 ){
- /* A memory allocation of a number of bytes which is near the maximum
- ** signed integer value might cause an integer overflow inside of the
- ** xMalloc(). Hence we limit the maximum size to 0x7fffff00, giving
- ** 255 bytes of overhead. SQLite itself will never use anything near
- ** this amount. The only way to reach the limit is with sqlite3_malloc() */
- p = 0;
- }else if( sqlite3GlobalConfig.bMemstat ){
- sqlite3_mutex_enter(mem0.mutex);
- mallocWithAlarm((int)n, &p);
- sqlite3_mutex_leave(mem0.mutex);
- }else{
- p = sqlite3GlobalConfig.m.xMalloc((int)n);
- }
- assert( EIGHT_BYTE_ALIGNMENT(p) ); /* IMP: R-11148-40995 */
- return p;
-}
-
-/*
-** This version of the memory allocation is for use by the application.
-** First make sure the memory subsystem is initialized, then do the
-** allocation.
-*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc(int n){
-#ifndef SQLITE_OMIT_AUTOINIT
- if( sqlite3_initialize() ) return 0;
-#endif
- return n<=0 ? 0 : sqlite3Malloc(n);
-}
-SQLITE_API void *SQLITE_STDCALL sqlite3_malloc64(sqlite3_uint64 n){
-#ifndef SQLITE_OMIT_AUTOINIT
- if( sqlite3_initialize() ) return 0;
-#endif
- return sqlite3Malloc(n);
-}
-
-/*
-** Each thread may only have a single outstanding allocation from
-** xScratchMalloc(). We verify this constraint in the single-threaded
-** case by setting scratchAllocOut to 1 when an allocation
-** is outstanding clearing it when the allocation is freed.
-*/
-#if SQLITE_THREADSAFE==0 && !defined(NDEBUG)
-static int scratchAllocOut = 0;
-#endif
-
-
-/*
-** Allocate memory that is to be used and released right away.
-** This routine is similar to alloca() in that it is not intended
-** for situations where the memory might be held long-term. This
-** routine is intended to get memory to old large transient data
-** structures that would not normally fit on the stack of an
-** embedded processor.
-*/
-SQLITE_PRIVATE void *sqlite3ScratchMalloc(int n){
- void *p;
- assert( n>0 );
-
- sqlite3_mutex_enter(mem0.mutex);
- sqlite3StatusHighwater(SQLITE_STATUS_SCRATCH_SIZE, n);
- if( mem0.nScratchFree && sqlite3GlobalConfig.szScratch>=n ){
- p = mem0.pScratchFree;
- mem0.pScratchFree = mem0.pScratchFree->pNext;
- mem0.nScratchFree--;
- sqlite3StatusUp(SQLITE_STATUS_SCRATCH_USED, 1);
- sqlite3_mutex_leave(mem0.mutex);
- }else{
- sqlite3_mutex_leave(mem0.mutex);
- p = sqlite3Malloc(n);
- if( sqlite3GlobalConfig.bMemstat && p ){
- sqlite3_mutex_enter(mem0.mutex);
- sqlite3StatusUp(SQLITE_STATUS_SCRATCH_OVERFLOW, sqlite3MallocSize(p));
- sqlite3_mutex_leave(mem0.mutex);
- }
- sqlite3MemdebugSetType(p, MEMTYPE_SCRATCH);
- }
- assert( sqlite3_mutex_notheld(mem0.mutex) );
-
-
-#if SQLITE_THREADSAFE==0 && !defined(NDEBUG)
- /* EVIDENCE-OF: R-12970-05880 SQLite will not use more than one scratch
- ** buffers per thread.
- **
- ** This can only be checked in single-threaded mode.
- */
- assert( scratchAllocOut==0 );
- if( p ) scratchAllocOut++;
-#endif
-
- return p;
-}
-SQLITE_PRIVATE void sqlite3ScratchFree(void *p){
- if( p ){
-
-#if SQLITE_THREADSAFE==0 && !defined(NDEBUG)
- /* Verify that no more than two scratch allocation per thread
- ** is outstanding at one time. (This is only checked in the
- ** single-threaded case since checking in the multi-threaded case
- ** would be much more complicated.) */
- assert( scratchAllocOut>=1 && scratchAllocOut<=2 );
- scratchAllocOut--;
-#endif
-
- if( SQLITE_WITHIN(p, sqlite3GlobalConfig.pScratch, mem0.pScratchEnd) ){
- /* Release memory from the SQLITE_CONFIG_SCRATCH allocation */
- ScratchFreeslot *pSlot;
- pSlot = (ScratchFreeslot*)p;
- sqlite3_mutex_enter(mem0.mutex);
- pSlot->pNext = mem0.pScratchFree;
- mem0.pScratchFree = pSlot;
- mem0.nScratchFree++;
- assert( mem0.nScratchFree <= (u32)sqlite3GlobalConfig.nScratch );
- sqlite3StatusDown(SQLITE_STATUS_SCRATCH_USED, 1);
- sqlite3_mutex_leave(mem0.mutex);
- }else{
- /* Release memory back to the heap */
- assert( sqlite3MemdebugHasType(p, MEMTYPE_SCRATCH) );
- assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_SCRATCH) );
- sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
- if( sqlite3GlobalConfig.bMemstat ){
- int iSize = sqlite3MallocSize(p);
- sqlite3_mutex_enter(mem0.mutex);
- sqlite3StatusDown(SQLITE_STATUS_SCRATCH_OVERFLOW, iSize);
- sqlite3StatusDown(SQLITE_STATUS_MEMORY_USED, iSize);
- sqlite3StatusDown(SQLITE_STATUS_MALLOC_COUNT, 1);
- sqlite3GlobalConfig.m.xFree(p);
- sqlite3_mutex_leave(mem0.mutex);
- }else{
- sqlite3GlobalConfig.m.xFree(p);
- }
- }
- }
-}
-
-/*
-** TRUE if p is a lookaside memory allocation from db
-*/
-#ifndef SQLITE_OMIT_LOOKASIDE
-static int isLookaside(sqlite3 *db, void *p){
- return SQLITE_WITHIN(p, db->lookaside.pStart, db->lookaside.pEnd);
-}
-#else
-#define isLookaside(A,B) 0
-#endif
-
-/*
-** Return the size of a memory allocation previously obtained from
-** sqlite3Malloc() or sqlite3_malloc().
-*/
-SQLITE_PRIVATE int sqlite3MallocSize(void *p){
- assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
- return sqlite3GlobalConfig.m.xSize(p);
-}
-SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, void *p){
- assert( p!=0 );
- if( db==0 || !isLookaside(db,p) ){
-#if SQLITE_DEBUG
- if( db==0 ){
- assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
- assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
- }else{
- assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- }
-#endif
- return sqlite3GlobalConfig.m.xSize(p);
- }else{
- assert( sqlite3_mutex_held(db->mutex) );
- return db->lookaside.sz;
- }
-}
-SQLITE_API sqlite3_uint64 SQLITE_STDCALL sqlite3_msize(void *p){
- assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
- assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
- return p ? sqlite3GlobalConfig.m.xSize(p) : 0;
-}
-
-/*
-** Free memory previously obtained from sqlite3Malloc().
-*/
-SQLITE_API void SQLITE_STDCALL sqlite3_free(void *p){
- if( p==0 ) return; /* IMP: R-49053-54554 */
- assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
- assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
- if( sqlite3GlobalConfig.bMemstat ){
- sqlite3_mutex_enter(mem0.mutex);
- sqlite3StatusDown(SQLITE_STATUS_MEMORY_USED, sqlite3MallocSize(p));
- sqlite3StatusDown(SQLITE_STATUS_MALLOC_COUNT, 1);
- sqlite3GlobalConfig.m.xFree(p);
- sqlite3_mutex_leave(mem0.mutex);
- }else{
- sqlite3GlobalConfig.m.xFree(p);
- }
-}
-
-/*
-** Add the size of memory allocation "p" to the count in
-** *db->pnBytesFreed.
-*/
-static SQLITE_NOINLINE void measureAllocationSize(sqlite3 *db, void *p){
- *db->pnBytesFreed += sqlite3DbMallocSize(db,p);
-}
-
-/*
-** Free memory that might be associated with a particular database
-** connection.
-*/
-SQLITE_PRIVATE void sqlite3DbFree(sqlite3 *db, void *p){
- assert( db==0 || sqlite3_mutex_held(db->mutex) );
- if( p==0 ) return;
- if( db ){
- if( db->pnBytesFreed ){
- measureAllocationSize(db, p);
- return;
- }
- if( isLookaside(db, p) ){
- LookasideSlot *pBuf = (LookasideSlot*)p;
-#if SQLITE_DEBUG
- /* Trash all content in the buffer being freed */
- memset(p, 0xaa, db->lookaside.sz);
-#endif
- pBuf->pNext = db->lookaside.pFree;
- db->lookaside.pFree = pBuf;
- db->lookaside.nOut--;
- return;
- }
- }
- assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- assert( db!=0 || sqlite3MemdebugNoType(p, MEMTYPE_LOOKASIDE) );
- sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
- sqlite3_free(p);
-}
-
-/*
-** Change the size of an existing memory allocation
-*/
-SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){
- int nOld, nNew, nDiff;
- void *pNew;
- assert( sqlite3MemdebugHasType(pOld, MEMTYPE_HEAP) );
- assert( sqlite3MemdebugNoType(pOld, (u8)~MEMTYPE_HEAP) );
- if( pOld==0 ){
- return sqlite3Malloc(nBytes); /* IMP: R-04300-56712 */
- }
- if( nBytes==0 ){
- sqlite3_free(pOld); /* IMP: R-26507-47431 */
- return 0;
- }
- if( nBytes>=0x7fffff00 ){
- /* The 0x7ffff00 limit term is explained in comments on sqlite3Malloc() */
- return 0;
- }
- nOld = sqlite3MallocSize(pOld);
- /* IMPLEMENTATION-OF: R-46199-30249 SQLite guarantees that the second
- ** argument to xRealloc is always a value returned by a prior call to
- ** xRoundup. */
- nNew = sqlite3GlobalConfig.m.xRoundup((int)nBytes);
- if( nOld==nNew ){
- pNew = pOld;
- }else if( sqlite3GlobalConfig.bMemstat ){
- sqlite3_mutex_enter(mem0.mutex);
- sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, (int)nBytes);
- nDiff = nNew - nOld;
- if( sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >=
- mem0.alarmThreshold-nDiff ){
- sqlite3MallocAlarm(nDiff);
- }
- pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew);
- if( pNew==0 && mem0.alarmThreshold>0 ){
- sqlite3MallocAlarm((int)nBytes);
- pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew);
- }
- if( pNew ){
- nNew = sqlite3MallocSize(pNew);
- sqlite3StatusUp(SQLITE_STATUS_MEMORY_USED, nNew-nOld);
- }
- sqlite3_mutex_leave(mem0.mutex);
- }else{
- pNew = sqlite3GlobalConfig.m.xRealloc(pOld, nNew);
- }
- assert( EIGHT_BYTE_ALIGNMENT(pNew) ); /* IMP: R-11148-40995 */
- return pNew;
-}
-
-/*
-** The public interface to sqlite3Realloc. Make sure that the memory
-** subsystem is initialized prior to invoking sqliteRealloc.
-*/
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc(void *pOld, int n){
-#ifndef SQLITE_OMIT_AUTOINIT
- if( sqlite3_initialize() ) return 0;
-#endif
- if( n<0 ) n = 0; /* IMP: R-26507-47431 */
- return sqlite3Realloc(pOld, n);
-}
-SQLITE_API void *SQLITE_STDCALL sqlite3_realloc64(void *pOld, sqlite3_uint64 n){
-#ifndef SQLITE_OMIT_AUTOINIT
- if( sqlite3_initialize() ) return 0;
-#endif
- return sqlite3Realloc(pOld, n);
-}
-
-
-/*
-** Allocate and zero memory.
-*/
-SQLITE_PRIVATE void *sqlite3MallocZero(u64 n){
- void *p = sqlite3Malloc(n);
- if( p ){
- memset(p, 0, (size_t)n);
- }
- return p;
-}
-
-/*
-** Allocate and zero memory. If the allocation fails, make
-** the mallocFailed flag in the connection pointer.
-*/
-SQLITE_PRIVATE void *sqlite3DbMallocZero(sqlite3 *db, u64 n){
- void *p;
- testcase( db==0 );
- p = sqlite3DbMallocRaw(db, n);
- if( p ) memset(p, 0, (size_t)n);
- return p;
-}
-
-
-/* Finish the work of sqlite3DbMallocRawNN for the unusual and
-** slower case when the allocation cannot be fulfilled using lookaside.
-*/
-static SQLITE_NOINLINE void *dbMallocRawFinish(sqlite3 *db, u64 n){
- void *p;
- assert( db!=0 );
- p = sqlite3Malloc(n);
- if( !p ) sqlite3OomFault(db);
- sqlite3MemdebugSetType(p,
- (db->lookaside.bDisable==0) ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP);
- return p;
-}
-
-/*
-** Allocate memory, either lookaside (if possible) or heap.
-** If the allocation fails, set the mallocFailed flag in
-** the connection pointer.
-**
-** If db!=0 and db->mallocFailed is true (indicating a prior malloc
-** failure on the same database connection) then always return 0.
-** Hence for a particular database connection, once malloc starts
-** failing, it fails consistently until mallocFailed is reset.
-** This is an important assumption. There are many places in the
-** code that do things like this:
-**
-** int *a = (int*)sqlite3DbMallocRaw(db, 100);
-** int *b = (int*)sqlite3DbMallocRaw(db, 200);
-** if( b ) a[10] = 9;
-**
-** In other words, if a subsequent malloc (ex: "b") worked, it is assumed
-** that all prior mallocs (ex: "a") worked too.
-**
-** The sqlite3MallocRawNN() variant guarantees that the "db" parameter is
-** not a NULL pointer.
-*/
-SQLITE_PRIVATE void *sqlite3DbMallocRaw(sqlite3 *db, u64 n){
- void *p;
- if( db ) return sqlite3DbMallocRawNN(db, n);
- p = sqlite3Malloc(n);
- sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
- return p;
-}
-SQLITE_PRIVATE void *sqlite3DbMallocRawNN(sqlite3 *db, u64 n){
-#ifndef SQLITE_OMIT_LOOKASIDE
- LookasideSlot *pBuf;
- assert( db!=0 );
- assert( sqlite3_mutex_held(db->mutex) );
- assert( db->pnBytesFreed==0 );
- if( db->lookaside.bDisable==0 ){
- assert( db->mallocFailed==0 );
- if( n>db->lookaside.sz ){
- db->lookaside.anStat[1]++;
- }else if( (pBuf = db->lookaside.pFree)==0 ){
- db->lookaside.anStat[2]++;
- }else{
- db->lookaside.pFree = pBuf->pNext;
- db->lookaside.nOut++;
- db->lookaside.anStat[0]++;
- if( db->lookaside.nOut>db->lookaside.mxOut ){
- db->lookaside.mxOut = db->lookaside.nOut;
- }
- return (void*)pBuf;
- }
- }else if( db->mallocFailed ){
- return 0;
- }
-#else
- assert( db!=0 );
- assert( sqlite3_mutex_held(db->mutex) );
- assert( db->pnBytesFreed==0 );
- if( db->mallocFailed ){
- return 0;
- }
-#endif
- return dbMallocRawFinish(db, n);
-}
-
-/* Forward declaration */
-static SQLITE_NOINLINE void *dbReallocFinish(sqlite3 *db, void *p, u64 n);
-
-/*
-** Resize the block of memory pointed to by p to n bytes. If the
-** resize fails, set the mallocFailed flag in the connection object.
-*/
-SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *db, void *p, u64 n){
- assert( db!=0 );
- if( p==0 ) return sqlite3DbMallocRawNN(db, n);
- assert( sqlite3_mutex_held(db->mutex) );
- if( isLookaside(db,p) && n<=db->lookaside.sz ) return p;
- return dbReallocFinish(db, p, n);
-}
-static SQLITE_NOINLINE void *dbReallocFinish(sqlite3 *db, void *p, u64 n){
- void *pNew = 0;
- assert( db!=0 );
- assert( p!=0 );
- if( db->mallocFailed==0 ){
- if( isLookaside(db, p) ){
- pNew = sqlite3DbMallocRawNN(db, n);
- if( pNew ){
- memcpy(pNew, p, db->lookaside.sz);
- sqlite3DbFree(db, p);
- }
- }else{
- assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
- pNew = sqlite3_realloc64(p, n);
- if( !pNew ){
- sqlite3OomFault(db);
- }
- sqlite3MemdebugSetType(pNew,
- (db->lookaside.bDisable==0 ? MEMTYPE_LOOKASIDE : MEMTYPE_HEAP));
- }
- }
- return pNew;
-}
-
-/*
-** Attempt to reallocate p. If the reallocation fails, then free p
-** and set the mallocFailed flag in the database connection.
-*/
-SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *db, void *p, u64 n){
- void *pNew;
- pNew = sqlite3DbRealloc(db, p, n);
- if( !pNew ){
- sqlite3DbFree(db, p);
- }
- return pNew;
-}
-
-/*
-** Make a copy of a string in memory obtained from sqliteMalloc(). These
-** functions call sqlite3MallocRaw() directly instead of sqliteMalloc(). This
-** is because when memory debugging is turned on, these two functions are
-** called via macros that record the current file and line number in the
-** ThreadData structure.
-*/
-SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3 *db, const char *z){
- char *zNew;
- size_t n;
- if( z==0 ){
- return 0;
- }
- n = sqlite3Strlen30(z) + 1;
- assert( (n&0x7fffffff)==n );
- zNew = sqlite3DbMallocRaw(db, (int)n);
- if( zNew ){
- memcpy(zNew, z, n);
- }
- return zNew;
-}
-SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3 *db, const char *z, u64 n){
- char *zNew;
- assert( db!=0 );
- if( z==0 ){
- return 0;
- }
- assert( (n&0x7fffffff)==n );
- zNew = sqlite3DbMallocRawNN(db, n+1);
- if( zNew ){
- memcpy(zNew, z, (size_t)n);
- zNew[n] = 0;
- }
- return zNew;
-}
-
-/*
-** Free any prior content in *pz and replace it with a copy of zNew.
-*/
-SQLITE_PRIVATE void sqlite3SetString(char **pz, sqlite3 *db, const char *zNew){
- sqlite3DbFree(db, *pz);
- *pz = sqlite3DbStrDup(db, zNew);
-}
-
-/*
-** Call this routine to record the fact that an OOM (out-of-memory) error
-** has happened. This routine will set db->mallocFailed, and also
-** temporarily disable the lookaside memory allocator and interrupt
-** any running VDBEs.
-*/
-SQLITE_PRIVATE void sqlite3OomFault(sqlite3 *db){
- if( db->mallocFailed==0 && db->bBenignMalloc==0 ){
- db->mallocFailed = 1;
- if( db->nVdbeExec>0 ){
- db->u1.isInterrupted = 1;
- }
- db->lookaside.bDisable++;
- }
-}
-
-/*
-** This routine reactivates the memory allocator and clears the
-** db->mallocFailed flag as necessary.
-**
-** The memory allocator is not restarted if there are running
-** VDBEs.
-*/
-SQLITE_PRIVATE void sqlite3OomClear(sqlite3 *db){
- if( db->mallocFailed && db->nVdbeExec==0 ){
- db->mallocFailed = 0;
- db->u1.isInterrupted = 0;
- assert( db->lookaside.bDisable>0 );
- db->lookaside.bDisable--;
- }
-}
-
-/*
-** Take actions at the end of an API call to indicate an OOM error
-*/
-static SQLITE_NOINLINE int apiOomError(sqlite3 *db){
- sqlite3OomClear(db);
- sqlite3Error(db, SQLITE_NOMEM);
- return SQLITE_NOMEM_BKPT;
-}
-
-/*
-** This function must be called before exiting any API function (i.e.
-** returning control to the user) that has called sqlite3_malloc or
-** sqlite3_realloc.
-**
-** The returned value is normally a copy of the second argument to this
-** function. However, if a malloc() failure has occurred since the previous
-** invocation SQLITE_NOMEM is returned instead.
-**
-** If an OOM as occurred, then the connection error-code (the value
-** returned by sqlite3_errcode()) is set to SQLITE_NOMEM.
-*/
-SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){
- /* If the db handle must hold the connection handle mutex here.
- ** Otherwise the read (and possible write) of db->mallocFailed
- ** is unsafe, as is the call to sqlite3Error().
- */
- assert( db!=0 );
- assert( sqlite3_mutex_held(db->mutex) );
- if( db->mallocFailed || rc==SQLITE_IOERR_NOMEM ){
- return apiOomError(db);
- }
- return rc & db->errMask;
-}
-
-/************** End of malloc.c **********************************************/
-/************** Begin file printf.c ******************************************/
-/*
-** The "printf" code that follows dates from the 1980's. It is in
-** the public domain.
-**
-**************************************************************************
-**
-** This file contains code for a set of "printf"-like routines. These
-** routines format strings much like the printf() from the standard C
-** library, though the implementation here has enhancements to support
-** SQLite.
-*/
-/* #include "sqliteInt.h" */
-
-/*
-** Conversion types fall into various categories as defined by the
-** following enumeration.
-*/
-#define etRADIX 0 /* Integer types. %d, %x, %o, and so forth */
-#define etFLOAT 1 /* Floating point. %f */
-#define etEXP 2 /* Exponentional notation. %e and %E */
-#define etGENERIC 3 /* Floating or exponential, depending on exponent. %g */
-#define etSIZE 4 /* Return number of characters processed so far. %n */
-#define etSTRING 5 /* Strings. %s */
-#define etDYNSTRING 6 /* Dynamically allocated strings. %z */
-#define etPERCENT 7 /* Percent symbol. %% */
-#define etCHARX 8 /* Characters. %c */
-/* The rest are extensions, not normally found in printf() */
-#define etSQLESCAPE 9 /* Strings with '\'' doubled. %q */
-#define etSQLESCAPE2 10 /* Strings with '\'' doubled and enclosed in '',
- NULL pointers replaced by SQL NULL. %Q */
-#define etTOKEN 11 /* a pointer to a Token structure */
-#define etSRCLIST 12 /* a pointer to a SrcList */
-#define etPOINTER 13 /* The %p conversion */
-#define etSQLESCAPE3 14 /* %w -> Strings with '\"' doubled */
-#define etORDINAL 15 /* %r -> 1st, 2nd, 3rd, 4th, etc. English only */
-
-#define etINVALID 16 /* Any unrecognized conversion type */
-
-
-/*
-** An "etByte" is an 8-bit unsigned value.
-*/
-typedef unsigned char etByte;
-
-/*
-** Each builtin conversion character (ex: the 'd' in "%d") is described
-** by an instance of the following structure
-*/
-typedef struct et_info { /* Information about each format field */
- char fmttype; /* The format field code letter */
- etByte base; /* The base for radix conversion */
- etByte flags; /* One or more of FLAG_ constants below */
- etByte type; /* Conversion paradigm */
- etByte charset; /* Offset into aDigits[] of the digits string */
- etByte prefix; /* Offset into aPrefix[] of the prefix string */
-} et_info;
-
-/*
-** Allowed values for et_info.flags
-*/
-#define FLAG_SIGNED 1 /* True if the value to convert is signed */
-#define FLAG_INTERN 2 /* True if for internal use only */
-#define FLAG_STRING 4 /* Allow infinity precision */
-
-
-/*
-** The following table is searched linearly, so it is good to put the
-** most frequently used conversion types first.
-*/
-static const char aDigits[] = "0123456789ABCDEF0123456789abcdef";
-static const char aPrefix[] = "-x0\000X0";
-static const et_info fmtinfo[] = {
- { 'd', 10, 1, etRADIX, 0, 0 },
- { 's', 0, 4, etSTRING, 0, 0 },
- { 'g', 0, 1, etGENERIC, 30, 0 },
- { 'z', 0, 4, etDYNSTRING, 0, 0 },
- { 'q', 0, 4, etSQLESCAPE, 0, 0 },
- { 'Q', 0, 4, etSQLESCAPE2, 0, 0 },
- { 'w', 0, 4, etSQLESCAPE3, 0, 0 },
- { 'c', 0, 0, etCHARX, 0, 0 },
- { 'o', 8, 0, etRADIX, 0, 2 },
- { 'u', 10, 0, etRADIX, 0, 0 },
- { 'x', 16, 0, etRADIX, 16, 1 },
- { 'X', 16, 0, etRADIX, 0, 4 },
-#ifndef SQLITE_OMIT_FLOATING_POINT
- { 'f', 0, 1, etFLOAT, 0, 0 },
- { 'e', 0, 1, etEXP, 30, 0 },
- { 'E', 0, 1, etEXP, 14, 0 },
- { 'G', 0, 1, etGENERIC, 14, 0 },
-#endif
- { 'i', 10, 1, etRADIX, 0, 0 },
- { 'n', 0, 0, etSIZE, 0, 0 },
- { '%', 0, 0, etPERCENT, 0, 0 },
- { 'p', 16, 0, etPOINTER, 0, 1 },
-
-/* All the rest have the FLAG_INTERN bit set and are thus for internal
-** use only */
- { 'T', 0, 2, etTOKEN, 0, 0 },
- { 'S', 0, 2, etSRCLIST, 0, 0 },
- { 'r', 10, 3, etORDINAL, 0, 0 },
-};
-
-/*
-** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point
-** conversions will work.
-*/
-#ifndef SQLITE_OMIT_FLOATING_POINT
-/*
-** "*val" is a double such that 0.1 <= *val < 10.0
-** Return the ascii code for the leading digit of *val, then
-** multiply "*val" by 10.0 to renormalize.
-**
-** Example:
-** input: *val = 3.14159
-** output: *val = 1.4159 function return = '3'
-**
-** The counter *cnt is incremented each time. After counter exceeds
-** 16 (the number of significant digits in a 64-bit float) '0' is
-** always returned.
-*/
-static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){
- int digit;
- LONGDOUBLE_TYPE d;
- if( (*cnt)<=0 ) return '0';
- (*cnt)--;
- digit = (int)*val;
- d = digit;
- digit += '0';
- *val = (*val - d)*10.0;
- return (char)digit;
-}
-#endif /* SQLITE_OMIT_FLOATING_POINT */
-
-/*
-** Set the StrAccum object to an error mode.
-*/
-static void setStrAccumError(StrAccum *p, u8 eError){
- assert( eError==STRACCUM_NOMEM || eError==STRACCUM_TOOBIG );
- p->accError = eError;
- p->nAlloc = 0;
-}
-
-/*
-** Extra argument values from a PrintfArguments object
-*/
-static sqlite3_int64 getIntArg(PrintfArguments *p){
- if( p->nArg<=p->nUsed ) return 0;
- return sqlite3_value_int64(p->apArg[p->nUsed++]);
-}
-static double getDoubleArg(PrintfArguments *p){
- if( p->nArg<=p->nUsed ) return 0.0;
- return sqlite3_value_double(p->apArg[p->nUsed++]);
-}
-static char *getTextArg(PrintfArguments *p){
- if( p->nArg<=p->nUsed ) return 0;
- return (char*)sqlite3_value_text(p->apArg[p->nUsed++]);
-}
-
-
-/*
-** On machines with a small stack size, you can redefine the
-** SQLITE_PRINT_BUF_SIZE to be something smaller, if desired.
-*/
-#ifndef SQLITE_PRINT_BUF_SIZE
-# define SQLITE_PRINT_BUF_SIZE 70
-#endif
-#define etBUFSIZE SQLITE_PRINT_BUF_SIZE /* Size of the output buffer */
-
-/*
-** Render a string given by "fmt" into the StrAccum object.
-*/
-SQLITE_PRIVATE void sqlite3VXPrintf(
- StrAccum *pAccum, /* Accumulate results here */
- const char *fmt, /* Format string */
- va_list ap /* arguments */
-){
- int c; /* Next character in the format string */
- char *bufpt; /* Pointer to the conversion buffer */
- int precision; /* Precision of the current field */
- int length; /* Length of the field */
- int idx; /* A general purpose loop counter */
- int width; /* Width of the current field */
- etByte flag_leftjustify; /* True if "-" flag is present */
- etByte flag_plussign; /* True if "+" flag is present */
- etByte flag_blanksign; /* True if " " flag is present */
- etByte flag_alternateform; /* True if "#" flag is present */
- etByte flag_altform2; /* True if "!" flag is present */
- etByte flag_zeropad; /* True if field width constant starts with zero */
- etByte flag_long; /* True if "l" flag is present */
- etByte flag_longlong; /* True if the "ll" flag is present */
- etByte done; /* Loop termination flag */
- etByte xtype = etINVALID; /* Conversion paradigm */
- u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */
- u8 useIntern; /* Ok to use internal conversions (ex: %T) */
- char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */
- sqlite_uint64 longvalue; /* Value for integer types */
- LONGDOUBLE_TYPE realvalue; /* Value for real types */
- const et_info *infop; /* Pointer to the appropriate info structure */
- char *zOut; /* Rendering buffer */
- int nOut; /* Size of the rendering buffer */
- char *zExtra = 0; /* Malloced memory used by some conversion */
-#ifndef SQLITE_OMIT_FLOATING_POINT
- int exp, e2; /* exponent of real numbers */
- int nsd; /* Number of significant digits returned */
- double rounder; /* Used for rounding floating point values */
- etByte flag_dp; /* True if decimal point should be shown */
- etByte flag_rtz; /* True if trailing zeros should be removed */
-#endif
- PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */
- char buf[etBUFSIZE]; /* Conversion buffer */
-
- bufpt = 0;
- if( pAccum->printfFlags ){
- if( (bArgList = (pAccum->printfFlags & SQLITE_PRINTF_SQLFUNC))!=0 ){
- pArgList = va_arg(ap, PrintfArguments*);
- }
- useIntern = pAccum->printfFlags & SQLITE_PRINTF_INTERNAL;
- }else{
- bArgList = useIntern = 0;
- }
- for(; (c=(*fmt))!=0; ++fmt){
- if( c!='%' ){
- bufpt = (char *)fmt;
-#if HAVE_STRCHRNUL
- fmt = strchrnul(fmt, '%');
-#else
- do{ fmt++; }while( *fmt && *fmt != '%' );
-#endif
- sqlite3StrAccumAppend(pAccum, bufpt, (int)(fmt - bufpt));
- if( *fmt==0 ) break;
- }
- if( (c=(*++fmt))==0 ){
- sqlite3StrAccumAppend(pAccum, "%", 1);
- break;
- }
- /* Find out what flags are present */
- flag_leftjustify = flag_plussign = flag_blanksign =
- flag_alternateform = flag_altform2 = flag_zeropad = 0;
- done = 0;
- do{
- switch( c ){
- case '-': flag_leftjustify = 1; break;
- case '+': flag_plussign = 1; break;
- case ' ': flag_blanksign = 1; break;
- case '#': flag_alternateform = 1; break;
- case '!': flag_altform2 = 1; break;
- case '0': flag_zeropad = 1; break;
- default: done = 1; break;
- }
- }while( !done && (c=(*++fmt))!=0 );
- /* Get the field width */
- if( c=='*' ){
- if( bArgList ){
- width = (int)getIntArg(pArgList);
- }else{
- width = va_arg(ap,int);
- }
- if( width<0 ){
- flag_leftjustify = 1;
- width = width >= -2147483647 ? -width : 0;
- }
- c = *++fmt;
- }else{
- unsigned wx = 0;
- while( c>='0' && c<='9' ){
- wx = wx*10 + c - '0';
- c = *++fmt;
- }
- testcase( wx>0x7fffffff );
- width = wx & 0x7fffffff;
- }
- assert( width>=0 );
-#ifdef SQLITE_PRINTF_PRECISION_LIMIT
- if( width>SQLITE_PRINTF_PRECISION_LIMIT ){
- width = SQLITE_PRINTF_PRECISION_LIMIT;
- }
-#endif
-
- /* Get the precision */
- if( c=='.' ){
- c = *++fmt;
- if( c=='*' ){
- if( bArgList ){
- precision = (int)getIntArg(pArgList);
- }else{
- precision = va_arg(ap,int);
- }
- c = *++fmt;
- if( precision<0 ){
- precision = precision >= -2147483647 ? -precision : -1;
- }
- }else{
- unsigned px = 0;
- while( c>='0' && c<='9' ){
- px = px*10 + c - '0';
- c = *++fmt;
- }
- testcase( px>0x7fffffff );
- precision = px & 0x7fffffff;
- }
- }else{
- precision = -1;
- }
- assert( precision>=(-1) );
-#ifdef SQLITE_PRINTF_PRECISION_LIMIT
- if( precision>SQLITE_PRINTF_PRECISION_LIMIT ){
- precision = SQLITE_PRINTF_PRECISION_LIMIT;
- }
-#endif
-
-
- /* Get the conversion type modifier */
- if( c=='l' ){
- flag_long = 1;
- c = *++fmt;
- if( c=='l' ){
- flag_longlong = 1;
- c = *++fmt;
- }else{
- flag_longlong = 0;
- }
- }else{
- flag_long = flag_longlong = 0;
- }
- /* Fetch the info entry for the field */
- infop = &fmtinfo[0];
- xtype = etINVALID;
- for(idx=0; idx