mirror of https://github.com/matrix-org/go-neb.git
Mark Haines
9 years ago
commit
9c83637852
102 changed files with 210722 additions and 0 deletions
-
28.gitignore
-
58README.md
-
95src/github.com/matrix-org/go-neb/api.go
-
170src/github.com/matrix-org/go-neb/clients/clients.go
-
208src/github.com/matrix-org/go-neb/database/db.go
-
202src/github.com/matrix-org/go-neb/database/schema.go
-
51src/github.com/matrix-org/go-neb/database/types.go
-
20src/github.com/matrix-org/go-neb/errors/errors.go
-
35src/github.com/matrix-org/go-neb/goneb.go
-
301src/github.com/matrix-org/go-neb/matrix/matrix.go
-
40src/github.com/matrix-org/go-neb/matrix/responses.go
-
116src/github.com/matrix-org/go-neb/matrix/types.go
-
61src/github.com/matrix-org/go-neb/matrix/worker.go
-
169src/github.com/matrix-org/go-neb/plugin/plugin.go
-
159src/github.com/matrix-org/go-neb/plugin/plugin_test.go
-
105src/github.com/matrix-org/go-neb/server/server.go
-
37src/github.com/matrix-org/go-neb/services/echo/echo.go
-
35vendor/manifest
-
66vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md
-
21vendor/src/github.com/Sirupsen/logrus/LICENSE
-
402vendor/src/github.com/Sirupsen/logrus/README.md
-
64vendor/src/github.com/Sirupsen/logrus/alt_exit.go
-
74vendor/src/github.com/Sirupsen/logrus/alt_exit_test.go
-
26vendor/src/github.com/Sirupsen/logrus/doc.go
-
264vendor/src/github.com/Sirupsen/logrus/entry.go
-
77vendor/src/github.com/Sirupsen/logrus/entry_test.go
-
50vendor/src/github.com/Sirupsen/logrus/examples/basic/basic.go
-
30vendor/src/github.com/Sirupsen/logrus/examples/hook/hook.go
-
193vendor/src/github.com/Sirupsen/logrus/exported.go
-
45vendor/src/github.com/Sirupsen/logrus/formatter.go
-
98vendor/src/github.com/Sirupsen/logrus/formatter_bench_test.go
-
122vendor/src/github.com/Sirupsen/logrus/hook_test.go
-
34vendor/src/github.com/Sirupsen/logrus/hooks.go
-
39vendor/src/github.com/Sirupsen/logrus/hooks/syslog/README.md
-
54vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go
-
26vendor/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go
-
67vendor/src/github.com/Sirupsen/logrus/hooks/test/test.go
-
39vendor/src/github.com/Sirupsen/logrus/hooks/test/test_test.go
-
41vendor/src/github.com/Sirupsen/logrus/json_formatter.go
-
120vendor/src/github.com/Sirupsen/logrus/json_formatter_test.go
-
212vendor/src/github.com/Sirupsen/logrus/logger.go
-
143vendor/src/github.com/Sirupsen/logrus/logrus.go
-
361vendor/src/github.com/Sirupsen/logrus/logrus_test.go
-
9vendor/src/github.com/Sirupsen/logrus/terminal_bsd.go
-
12vendor/src/github.com/Sirupsen/logrus/terminal_linux.go
-
21vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go
-
15vendor/src/github.com/Sirupsen/logrus/terminal_solaris.go
-
27vendor/src/github.com/Sirupsen/logrus/terminal_windows.go
-
161vendor/src/github.com/Sirupsen/logrus/text_formatter.go
-
61vendor/src/github.com/Sirupsen/logrus/text_formatter_test.go
-
53vendor/src/github.com/Sirupsen/logrus/writer.go
-
47vendor/src/github.com/mattn/go-shellwords/README.md
-
134vendor/src/github.com/mattn/go-shellwords/shellwords.go
-
132vendor/src/github.com/mattn/go-shellwords/shellwords_test.go
-
19vendor/src/github.com/mattn/go-shellwords/util_posix.go
-
17vendor/src/github.com/mattn/go-shellwords/util_windows.go
-
21vendor/src/github.com/mattn/go-sqlite3/LICENSE
-
81vendor/src/github.com/mattn/go-sqlite3/README.md
-
133vendor/src/github.com/mattn/go-sqlite3/_example/custom_func/main.go
-
71vendor/src/github.com/mattn/go-sqlite3/_example/hook/hook.go
-
22vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/Makefile
-
43vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/extension.go
-
31vendor/src/github.com/mattn/go-sqlite3/_example/mod_regexp/sqlite3_mod_regexp.c
-
24vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/Makefile
-
36vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/extension.go
-
1040vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h
-
238vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/sqlite3_mod_vtable.cc
-
106vendor/src/github.com/mattn/go-sqlite3/_example/simple/simple.go
-
74vendor/src/github.com/mattn/go-sqlite3/backup.go
-
336vendor/src/github.com/mattn/go-sqlite3/callback.go
-
97vendor/src/github.com/mattn/go-sqlite3/callback_test.go
-
112vendor/src/github.com/mattn/go-sqlite3/doc.go
-
128vendor/src/github.com/mattn/go-sqlite3/error.go
-
242vendor/src/github.com/mattn/go-sqlite3/error_test.go
-
189319vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.c
-
8733vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.h
-
1012vendor/src/github.com/mattn/go-sqlite3/sqlite3.go
-
130vendor/src/github.com/mattn/go-sqlite3/sqlite3_fts3_test.go
-
13vendor/src/github.com/mattn/go-sqlite3/sqlite3_fts5.go
-
13vendor/src/github.com/mattn/go-sqlite3/sqlite3_icu.go
-
12vendor/src/github.com/mattn/go-sqlite3/sqlite3_json1.go
-
14vendor/src/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go
-
63vendor/src/github.com/mattn/go-sqlite3/sqlite3_load_extension.go
-
23vendor/src/github.com/mattn/go-sqlite3/sqlite3_omit_load_extension.go
-
13vendor/src/github.com/mattn/go-sqlite3/sqlite3_other.go
-
1350vendor/src/github.com/mattn/go-sqlite3/sqlite3_test.go
-
409vendor/src/github.com/mattn/go-sqlite3/sqlite3_test/sqltest.go
-
14vendor/src/github.com/mattn/go-sqlite3/sqlite3_windows.go
-
546vendor/src/github.com/mattn/go-sqlite3/sqlite3ext.h
-
27vendor/src/gopkg.in/airbrake/gobrake.v2/LICENSE
-
47vendor/src/gopkg.in/airbrake/gobrake.v2/README.md
-
37vendor/src/gopkg.in/airbrake/gobrake.v2/bench_test.go
-
14vendor/src/gopkg.in/airbrake/gobrake.v2/circle.yml
-
16vendor/src/gopkg.in/airbrake/gobrake.v2/gobrake.go
-
78vendor/src/gopkg.in/airbrake/gobrake.v2/notice.go
-
238vendor/src/gopkg.in/airbrake/gobrake.v2/notifier.go
-
136vendor/src/gopkg.in/airbrake/gobrake.v2/notifier_test.go
-
59vendor/src/gopkg.in/airbrake/gobrake.v2/util.go
-
21vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/LICENSE
-
33vendor/src/gopkg.in/gemnasium/logrus-airbrake-hook.v2/README.md
@ -0,0 +1,28 @@ |
|||||
|
.*.swp |
||||
|
|
||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects) |
||||
|
*.o |
||||
|
*.a |
||||
|
*.so |
||||
|
|
||||
|
# Folders |
||||
|
bin |
||||
|
pkg |
||||
|
_obj |
||||
|
_test |
||||
|
|
||||
|
# Architecture specific extensions/prefixes |
||||
|
*.[568vq] |
||||
|
[568vq].out |
||||
|
|
||||
|
*.cgo1.go |
||||
|
*.cgo2.c |
||||
|
_cgo_defun.c |
||||
|
_cgo_gotypes.go |
||||
|
_cgo_export.* |
||||
|
|
||||
|
_testmain.go |
||||
|
|
||||
|
*.exe |
||||
|
*.test |
||||
|
*.prof |
@ -0,0 +1,58 @@ |
|||||
|
# Running goneb |
||||
|
|
||||
|
Goneb uses environment variables to configure its database and bind address. |
||||
|
To run goneb: |
||||
|
|
||||
|
BIND_ADDRESS=:4050 DATABASE_TYPE=sqlite3 DATABASE_URL=goneb.db bin/goneb |
||||
|
|
||||
|
|
||||
|
Goneb needs to connect as a matrix user to receive messages. Goneb can listen |
||||
|
for messages as multiple matrix users. The users are configured using an |
||||
|
HTTP API and the config is stored in the database. Goneb will automatically |
||||
|
start syncing matrix messages when the user is configured. To create a user: |
||||
|
|
||||
|
curl -X POST localhost:4050/admin/configureClient --data-binary '{ |
||||
|
"UserID": "@goneb:localhost:8448", |
||||
|
"HomeserverURL": "http://localhost:8008", |
||||
|
"AccessToken": "<access_token>" |
||||
|
}' |
||||
|
{ |
||||
|
"OldClient": {}, |
||||
|
"NewClient": { |
||||
|
"UserID": "@goneb:localhost:8448", |
||||
|
"HomeserverURL": "http://localhost:8008", |
||||
|
"AccessToken": "<access_token>" |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
Services in goneb listen for messages in particular rooms using a given matrix |
||||
|
user. Services are configured using an HTTP API and the config is stored in the |
||||
|
database. Services use one of the matrix users configured on goneb to receive |
||||
|
matrix messages. Each service is configured to listen for messages in a set |
||||
|
of rooms. Goneb will automatically join the service to its rooms when it is |
||||
|
configured. To start a service: |
||||
|
|
||||
|
curl -X POST localhost:4050/admin/configureService --data-binary '{ |
||||
|
"Type": "echo", |
||||
|
"Id": "myserviceid", |
||||
|
"Config": { |
||||
|
"UserID": "@goneb:localhost:8448", |
||||
|
"Rooms": ["!QkdpvTwGlrptdeViJx:localhost:8448"] |
||||
|
} |
||||
|
}' |
||||
|
{ |
||||
|
"Type": "echo", |
||||
|
"Id": "myserviceid", |
||||
|
"OldConfig": {}, |
||||
|
"NewConfig": { |
||||
|
"UserID": "@goneb:localhost:8448", |
||||
|
"Rooms": ["!QkdpvTwGlrptdeViJx:localhost:8448"] |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
Goneb has a heartbeat listener that returns 200 OK so that load balancers can |
||||
|
check that the server is still running. |
||||
|
|
||||
|
curl -X GET localhost:4050/test |
||||
|
|
||||
|
{} |
@ -0,0 +1,95 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"encoding/json" |
||||
|
"github.com/matrix-org/go-neb/errors" |
||||
|
"github.com/matrix-org/go-neb/clients" |
||||
|
"github.com/matrix-org/go-neb/database" |
||||
|
"net/http" |
||||
|
) |
||||
|
|
||||
|
type heartbeatHandler struct{} |
||||
|
|
||||
|
func (*heartbeatHandler) OnIncomingRequest(req *http.Request) (interface{}, *errors.HTTPError) { |
||||
|
return &struct{}{}, nil |
||||
|
} |
||||
|
|
||||
|
type configureClientHandler struct { |
||||
|
db *database.ServiceDB |
||||
|
clients *clients.Clients |
||||
|
} |
||||
|
|
||||
|
func (s *configureClientHandler) OnIncomingRequest(req *http.Request) (interface{}, *errors.HTTPError) { |
||||
|
if req.Method != "POST" { |
||||
|
return nil, &errors.HTTPError{nil, "Unsupported Method", 405} |
||||
|
} |
||||
|
|
||||
|
var body database.ClientConfig |
||||
|
if err := json.NewDecoder(req.Body).Decode(&body); err != nil { |
||||
|
return nil, &errors.HTTPError{err, "Error parsing request JSON", 400} |
||||
|
} |
||||
|
|
||||
|
if err := body.Check(); err != nil { |
||||
|
return nil, &errors.HTTPError{err, "Error parsing client config", 400} |
||||
|
} |
||||
|
|
||||
|
oldClient, err := s.clients.Update(body) |
||||
|
if err != nil { |
||||
|
return nil, &errors.HTTPError{err, "Error storing token", 500} |
||||
|
} |
||||
|
|
||||
|
return &struct { |
||||
|
OldClient database.ClientConfig |
||||
|
NewClient database.ClientConfig |
||||
|
}{oldClient, body}, nil |
||||
|
} |
||||
|
|
||||
|
type configureServiceHandler struct { |
||||
|
db *database.ServiceDB |
||||
|
clients *clients.Clients |
||||
|
} |
||||
|
|
||||
|
func (s *configureServiceHandler) OnIncomingRequest(req *http.Request) (interface{}, *errors.HTTPError) { |
||||
|
if req.Method != "POST" { |
||||
|
return nil, &errors.HTTPError{nil, "Unsupported Method", 405} |
||||
|
} |
||||
|
|
||||
|
var body struct { |
||||
|
ID string |
||||
|
Type string |
||||
|
Config json.RawMessage |
||||
|
} |
||||
|
if err := json.NewDecoder(req.Body).Decode(&body); err != nil { |
||||
|
return nil, &errors.HTTPError{err, "Error parsing request JSON", 400} |
||||
|
} |
||||
|
|
||||
|
if body.ID == "" || body.Type == "" || body.Config == nil { |
||||
|
return nil, &errors.HTTPError{nil, `Must supply a "ID", a "Type" and a "Config"`, 400} |
||||
|
} |
||||
|
|
||||
|
service := database.CreateService(body.ID, body.Type) |
||||
|
if service == nil { |
||||
|
return nil, &errors.HTTPError{nil, "Unknown service type", 400} |
||||
|
} |
||||
|
|
||||
|
if err := json.Unmarshal(body.Config, service); err != nil { |
||||
|
return nil, &errors.HTTPError{err, "Error parsing config JSON", 400} |
||||
|
} |
||||
|
|
||||
|
client, err := s.clients.Client(service.ServiceUserID()) |
||||
|
if err != nil { |
||||
|
return nil, &errors.HTTPError{err, "Unknown matrix client", 400} |
||||
|
} |
||||
|
|
||||
|
oldService, err := s.db.StoreService(service, client) |
||||
|
if err != nil { |
||||
|
return nil, &errors.HTTPError{err, "Error storing service", 500} |
||||
|
} |
||||
|
|
||||
|
return &struct { |
||||
|
ID string |
||||
|
Type string |
||||
|
OldConfig database.Service |
||||
|
NewConfig database.Service |
||||
|
}{body.ID, body.Type, oldService, service}, nil |
||||
|
} |
@ -0,0 +1,170 @@ |
|||||
|
package clients |
||||
|
|
||||
|
import ( |
||||
|
log "github.com/Sirupsen/logrus" |
||||
|
"github.com/matrix-org/go-neb/database" |
||||
|
"github.com/matrix-org/go-neb/matrix" |
||||
|
"github.com/matrix-org/go-neb/plugin" |
||||
|
"net/url" |
||||
|
"sync" |
||||
|
) |
||||
|
|
||||
|
// A Clients is a collection of clients used for bot services.
|
||||
|
type Clients struct { |
||||
|
db *database.ServiceDB |
||||
|
dbMutex sync.Mutex |
||||
|
mapMutex sync.Mutex |
||||
|
clients map[string]clientEntry |
||||
|
} |
||||
|
|
||||
|
// Make a new collection of matrix clients
|
||||
|
func Make(db *database.ServiceDB) *Clients { |
||||
|
clients := &Clients{ |
||||
|
db: db, |
||||
|
clients: make(map[string]clientEntry), |
||||
|
} |
||||
|
return clients |
||||
|
} |
||||
|
|
||||
|
// Client gets a client for the userID
|
||||
|
func (c *Clients) Client(userID string) (*matrix.Client, error) { |
||||
|
entry := c.getClient(userID) |
||||
|
if entry.client != nil { |
||||
|
return entry.client, nil |
||||
|
} |
||||
|
entry, err := c.loadClientFromDB(userID) |
||||
|
return entry.client, err |
||||
|
} |
||||
|
|
||||
|
// Update updates the config for a matrix client
|
||||
|
func (c *Clients) Update(config database.ClientConfig) (database.ClientConfig, error) { |
||||
|
_, old, err := c.updateClientInDB(config) |
||||
|
return old.config, err |
||||
|
} |
||||
|
|
||||
|
// Start the clients in the database and join them to the rooms.
|
||||
|
func (c *Clients) Start() error { |
||||
|
userIDsToRooms, err := c.db.LoadServiceUserIds() |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
for userID, roomIDs := range userIDsToRooms { |
||||
|
client, err := c.Client(userID) |
||||
|
if err != nil { |
||||
|
log.WithFields(log.Fields{ |
||||
|
log.ErrorKey: err, |
||||
|
"service_user_id": userID, |
||||
|
}).Warn("Error loading matrix client") |
||||
|
return err |
||||
|
} |
||||
|
for _, roomID := range roomIDs { |
||||
|
_, err := client.JoinRoom(roomID, "") |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
type clientEntry struct { |
||||
|
config database.ClientConfig |
||||
|
client *matrix.Client |
||||
|
} |
||||
|
|
||||
|
func (c *Clients) getClient(userID string) clientEntry { |
||||
|
c.mapMutex.Lock() |
||||
|
defer c.mapMutex.Unlock() |
||||
|
return c.clients[userID] |
||||
|
} |
||||
|
|
||||
|
func (c *Clients) setClient(client clientEntry) { |
||||
|
c.mapMutex.Lock() |
||||
|
defer c.mapMutex.Unlock() |
||||
|
c.clients[client.config.UserID] = client |
||||
|
} |
||||
|
|
||||
|
func (c *Clients) loadClientFromDB(userID string) (entry clientEntry, err error) { |
||||
|
c.dbMutex.Lock() |
||||
|
defer c.dbMutex.Unlock() |
||||
|
|
||||
|
entry = c.getClient(userID) |
||||
|
if entry.client != nil { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if entry.config, err = c.db.LoadMatrixClientConfig(userID); err != nil { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if entry.client, err = c.newClient(entry.config); err != nil { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
c.setClient(entry) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (c *Clients) updateClientInDB(newConfig database.ClientConfig) (new clientEntry, old clientEntry, err error) { |
||||
|
c.dbMutex.Lock() |
||||
|
defer c.dbMutex.Unlock() |
||||
|
|
||||
|
old = c.getClient(newConfig.UserID) |
||||
|
if old.client != nil && old.config == newConfig { |
||||
|
// Already have a client with that config.
|
||||
|
new = old |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
new.config = newConfig |
||||
|
|
||||
|
if new.client, err = c.newClient(new.config); err != nil { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if old.config, err = c.db.StoreMatrixClientConfig(new.config); err != nil { |
||||
|
new.client.StopSync() |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if old.client != nil { |
||||
|
old.client.StopSync() |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
c.setClient(new) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (c *Clients) newClient(config database.ClientConfig) (*matrix.Client, error) { |
||||
|
|
||||
|
homeserverURL, err := url.Parse(config.HomeserverURL) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
client := matrix.NewClient(homeserverURL, config.AccessToken, config.UserID) |
||||
|
|
||||
|
// TODO: Check that the access token is valid for the userID by peforming
|
||||
|
// a request against the server.
|
||||
|
|
||||
|
client.Worker.OnEventType("m.room.message", func(event *matrix.Event) { |
||||
|
services, err := c.db.LoadServicesInRoom(client.UserID, event.RoomID) |
||||
|
if err != nil { |
||||
|
log.WithFields(log.Fields{ |
||||
|
log.ErrorKey: err, |
||||
|
"room_id": event.RoomID, |
||||
|
"service_user_id": client.UserID, |
||||
|
}).Warn("Error loading services") |
||||
|
} |
||||
|
var plugins []plugin.Plugin |
||||
|
for _, service := range services { |
||||
|
plugins = append(plugins, service.Plugin(event.RoomID)) |
||||
|
} |
||||
|
plugin.OnMessage(plugins, client, event) |
||||
|
}) |
||||
|
|
||||
|
go client.Sync() |
||||
|
|
||||
|
return client, nil |
||||
|
} |
@ -0,0 +1,208 @@ |
|||||
|
package database |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"github.com/matrix-org/go-neb/matrix" |
||||
|
"sort" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
// A ServiceDB stores the configuration for the services
|
||||
|
type ServiceDB struct { |
||||
|
db *sql.DB |
||||
|
} |
||||
|
|
||||
|
// Open a SQL database to use as a ServiceDB. This will automatically create
|
||||
|
// the necessary database tables if they aren't already present.
|
||||
|
func Open(databaseType, databaseURL string) (serviceDB *ServiceDB, err error) { |
||||
|
db, err := sql.Open(databaseType, databaseURL) |
||||
|
if err != nil { |
||||
|
return |
||||
|
} |
||||
|
if _, err = db.Exec(schemaSQL); err != nil { |
||||
|
return |
||||
|
} |
||||
|
serviceDB = &ServiceDB{db: db} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// StoreMatrixClientConfig stores the Matrix client config for a bot service.
|
||||
|
// If a config already exists then it will be updated, otherwise a new config
|
||||
|
// will be inserted. The previous config is returned.
|
||||
|
func (d *ServiceDB) StoreMatrixClientConfig(config ClientConfig) (oldConfig ClientConfig, err error) { |
||||
|
err = runTransaction(d.db, func(txn *sql.Tx) error { |
||||
|
oldConfig, err = selectMatrixClientConfigTxn(txn, config.UserID) |
||||
|
now := time.Now() |
||||
|
if err == nil { |
||||
|
return updateMatrixClientConfigTxn(txn, now, config) |
||||
|
} else if err == sql.ErrNoRows { |
||||
|
return insertMatrixClientConfigTxn(txn, now, config) |
||||
|
} else { |
||||
|
return err |
||||
|
} |
||||
|
}) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// LoadServiceUserIds loads the user ids used by the bots in the database and
|
||||
|
// the rooms those bots should be joined to.
|
||||
|
func (d *ServiceDB) LoadServiceUserIds() (userIDsToRooms map[string][]string, err error) { |
||||
|
err = runTransaction(d.db, func(txn *sql.Tx) error { |
||||
|
userIDsToRooms, err = selectServiceUserIDsTxn(txn) |
||||
|
return err |
||||
|
}) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// LoadMatrixClientConfig loads a Matrix client config from the database.
|
||||
|
// Returns sql.ErrNoRows if the client isn't in the database.
|
||||
|
func (d *ServiceDB) LoadMatrixClientConfig(userID string) (config ClientConfig, err error) { |
||||
|
err = runTransaction(d.db, func(txn *sql.Tx) error { |
||||
|
config, err = selectMatrixClientConfigTxn(txn, userID) |
||||
|
return err |
||||
|
}) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// LoadService loads a service from the database.
|
||||
|
// Returns sql.ErrNoRows if the service isn't in the database.
|
||||
|
func (d *ServiceDB) LoadService(serviceID string) (service Service, err error) { |
||||
|
err = runTransaction(d.db, func(txn *sql.Tx) error { |
||||
|
service, err = selectServiceTxn(txn, serviceID) |
||||
|
return err |
||||
|
}) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// LoadServicesInRoom loads all the bot services configured for a room.
|
||||
|
// Returns the empty list if there aren't any services configured.
|
||||
|
func (d *ServiceDB) LoadServicesInRoom(serviceUserID, roomID string) (services []Service, err error) { |
||||
|
err = runTransaction(d.db, func(txn *sql.Tx) error { |
||||
|
serviceIDs, err := selectRoomServicesTxn(txn, serviceUserID, roomID) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
for _, serviceID := range serviceIDs { |
||||
|
service, err := selectServiceTxn(txn, serviceID) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
services = append(services, service) |
||||
|
} |
||||
|
return nil |
||||
|
}) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// StoreService stores a service into the database either by inserting a new
|
||||
|
// service or updating an existing service. Returns the old service if there
|
||||
|
// was one.
|
||||
|
func (d *ServiceDB) StoreService(service Service, client *matrix.Client) (oldService Service, err error) { |
||||
|
err = runTransaction(d.db, func(txn *sql.Tx) error { |
||||
|
oldService, err = selectServiceTxn(txn, service.ServiceID()) |
||||
|
if err != nil && err != sql.ErrNoRows { |
||||
|
return err |
||||
|
} |
||||
|
now := time.Now() |
||||
|
|
||||
|
var newRoomIDs []string |
||||
|
var oldRoomIDs []string |
||||
|
|
||||
|
if oldService == nil { |
||||
|
if err := insertServiceTxn(txn, now, service); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
newRoomIDs = service.RoomIDs() |
||||
|
} else { |
||||
|
if err := updateServiceTxn(txn, now, service); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if service.ServiceUserID() == oldService.ServiceUserID() { |
||||
|
oldRoomIDs, newRoomIDs = difference( |
||||
|
oldService.RoomIDs(), service.RoomIDs(), |
||||
|
) |
||||
|
} else { |
||||
|
oldRoomIDs = oldService.RoomIDs() |
||||
|
newRoomIDs = service.RoomIDs() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
for _, roomID := range oldRoomIDs { |
||||
|
if err := deleteRoomServiceTxn( |
||||
|
txn, oldService.ServiceUserID(), roomID, service.ServiceID(), |
||||
|
); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
// TODO: Leave the old rooms.
|
||||
|
} |
||||
|
|
||||
|
for _, roomID := range newRoomIDs { |
||||
|
if err := insertRoomServiceTxn( |
||||
|
txn, now, service.ServiceUserID(), roomID, service.ServiceID(), |
||||
|
); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// TODO: Making HTTP requests inside the database transaction is unfortunate.
|
||||
|
// But it is the easiest way of making sure that the changes we
|
||||
|
// made to the database get rolled back if the requests fail.
|
||||
|
if _, err := client.JoinRoom(roomID, ""); err != nil { |
||||
|
// TODO: What happens to the rooms that we successfully joined?
|
||||
|
// Should we leave them now?
|
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
}) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func runTransaction(db *sql.DB, fn func(txn *sql.Tx) error) (err error) { |
||||
|
txn, err := db.Begin() |
||||
|
if err != nil { |
||||
|
return |
||||
|
} |
||||
|
defer func() { |
||||
|
if r := recover(); r != nil { |
||||
|
txn.Rollback() |
||||
|
panic(r) |
||||
|
} else if err != nil { |
||||
|
txn.Rollback() |
||||
|
} else { |
||||
|
err = txn.Commit() |
||||
|
} |
||||
|
}() |
||||
|
err = fn(txn) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// difference returns the elements that are only in the first list and
|
||||
|
// the elements that are only in the second. As a side-effect this sorts
|
||||
|
// the input lists in-place.
|
||||
|
func difference(a, b []string) (onlyA, onlyB []string) { |
||||
|
sort.Strings(a) |
||||
|
sort.Strings(b) |
||||
|
for { |
||||
|
if len(b) == 0 { |
||||
|
onlyA = append(onlyA, a...) |
||||
|
return |
||||
|
} |
||||
|
if len(a) == 0 { |
||||
|
onlyB = append(onlyB, b...) |
||||
|
return |
||||
|
} |
||||
|
xA := a[0] |
||||
|
xB := b[0] |
||||
|
if xA < xB { |
||||
|
onlyA = append(onlyA, xA) |
||||
|
a = a[1:] |
||||
|
} else if xA > xB { |
||||
|
onlyB = append(onlyB, xB) |
||||
|
b = b[1:] |
||||
|
} else { |
||||
|
a = a[1:] |
||||
|
b = b[1:] |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,202 @@ |
|||||
|
package database |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"encoding/json" |
||||
|
"fmt" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
const schemaSQL = ` |
||||
|
CREATE TABLE IF NOT EXISTS services ( |
||||
|
service_id TEXT NOT NULL, |
||||
|
service_type TEXT NOT NULL, |
||||
|
service_json TEXT NOT NULL, |
||||
|
time_added_ms BIGINT NOT NULL, |
||||
|
time_updated_ms BIGINT NOT NULL, |
||||
|
UNIQUE(service_id) |
||||
|
); |
||||
|
|
||||
|
CREATE TABLE IF NOT EXISTS rooms_to_services ( |
||||
|
service_user_id TEXT NOT NULL, |
||||
|
room_id TEXT NOT NULL, |
||||
|
service_id TEXT NOT NULL, |
||||
|
time_added_ms BIGINT NOT NULL, |
||||
|
UNIQUE(service_user_id, room_id, service_id) |
||||
|
); |
||||
|
|
||||
|
CREATE TABLE IF NOT EXISTS matrix_clients ( |
||||
|
user_id TEXT NOT NULL, |
||||
|
client_json TEXT NOT NULL, |
||||
|
next_batch TEXT NOT NULL, |
||||
|
time_added_ms BIGINT NOT NULL, |
||||
|
time_updated_ms BIGINT NOT NULL, |
||||
|
UNIQUE(user_id) |
||||
|
); |
||||
|
` |
||||
|
|
||||
|
const selectServiceUserIDsSQL = ` |
||||
|
SELECT service_user_id, room_id FROM rooms_to_services |
||||
|
GROUP BY service_user_id, room_id |
||||
|
` |
||||
|
|
||||
|
// selectServiceUserIDsTxn returns a map from userIDs to lists of roomIDs.
|
||||
|
func selectServiceUserIDsTxn(txn *sql.Tx) (map[string][]string, error) { |
||||
|
rows, err := txn.Query(selectServiceUserIDsSQL) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
result := make(map[string][]string) |
||||
|
for rows.Next() { |
||||
|
var uID, rID string |
||||
|
if err = rows.Scan(&uID, &rID); err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
result[uID] = append(result[uID], rID) |
||||
|
} |
||||
|
return result, nil |
||||
|
} |
||||
|
|
||||
|
const selectMatrixClientConfigSQL = ` |
||||
|
SELECT client_json FROM matrix_clients WHERE user_id = $1 |
||||
|
` |
||||
|
|
||||
|
func selectMatrixClientConfigTxn(txn *sql.Tx, userID string) (config ClientConfig, err error) { |
||||
|
var configJSON []byte |
||||
|
err = txn.QueryRow(selectMatrixClientConfigSQL, userID).Scan(&configJSON) |
||||
|
if err != nil { |
||||
|
return |
||||
|
} |
||||
|
err = json.Unmarshal(configJSON, &config) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
const insertMatrixClientConfigSQL = ` |
||||
|
INSERT INTO matrix_clients( |
||||
|
user_id, client_json, next_batch, time_added_ms, time_updated_ms |
||||
|
) VALUES ($1, $2, '', $3, $4) |
||||
|
` |
||||
|
|
||||
|
func insertMatrixClientConfigTxn(txn *sql.Tx, now time.Time, config ClientConfig) error { |
||||
|
t := now.UnixNano() / 1000000 |
||||
|
configJSON, err := json.Marshal(&config) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
_, err = txn.Exec(insertMatrixClientConfigSQL, config.UserID, configJSON, t, t) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
const updateMatrixClientConfigSQL = ` |
||||
|
UPDATE matrix_clients SET client_json = $1, time_updated_ms = $2 |
||||
|
WHERE user_id = $3 |
||||
|
` |
||||
|
|
||||
|
func updateMatrixClientConfigTxn(txn *sql.Tx, now time.Time, config ClientConfig) error { |
||||
|
t := now.UnixNano() / 1000000 |
||||
|
configJSON, err := json.Marshal(&config) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
_, err = txn.Exec(updateMatrixClientConfigSQL, configJSON, t, config.UserID) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
const selectServiceSQL = ` |
||||
|
SELECT service_type, service_json FROM services |
||||
|
WHERE service_id = $1 |
||||
|
` |
||||
|
|
||||
|
func selectServiceTxn(txn *sql.Tx, serviceID string) (Service, error) { |
||||
|
var serviceType string |
||||
|
var serviceJSON []byte |
||||
|
row := txn.QueryRow(selectServiceSQL, serviceID) |
||||
|
if err := row.Scan(&serviceType, &serviceJSON); err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
service := CreateService(serviceID, serviceType) |
||||
|
if service == nil { |
||||
|
return nil, fmt.Errorf("Cannot create services of type %s", serviceType) |
||||
|
} |
||||
|
if err := json.Unmarshal(serviceJSON, service); err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
return service, nil |
||||
|
} |
||||
|
|
||||
|
const updateServiceSQL = ` |
||||
|
UPDATE services SET service_type=$1, service_json=$2, time_updated_ms=$3 |
||||
|
WHERE service_id=$4 |
||||
|
` |
||||
|
|
||||
|
func updateServiceTxn(txn *sql.Tx, now time.Time, service Service) error { |
||||
|
serviceJSON, err := json.Marshal(service) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
t := now.UnixNano() / 1000000 |
||||
|
_, err = txn.Exec( |
||||
|
updateServiceSQL, service.ServiceType(), serviceJSON, t, |
||||
|
service.ServiceID(), |
||||
|
) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
const insertServiceSQL = ` |
||||
|
INSERT INTO services( |
||||
|
service_id, service_type, service_json, time_added_ms, time_updated_ms |
||||
|
) VALUES ($1, $2, $3, $4, $5) |
||||
|
` |
||||
|
|
||||
|
func insertServiceTxn(txn *sql.Tx, now time.Time, service Service) error { |
||||
|
serviceJSON, err := json.Marshal(service) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
t := now.UnixNano() / 1000000 |
||||
|
_, err = txn.Exec( |
||||
|
insertServiceSQL, |
||||
|
service.ServiceID(), service.ServiceType(), serviceJSON, t, t, |
||||
|
) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
const insertRoomServiceSQL = ` |
||||
|
INSERT INTO rooms_to_services(service_user_id, room_id, service_id, time_added_ms) |
||||
|
VALUES ($1, $2, $3, $4) |
||||
|
` |
||||
|
|
||||
|
func insertRoomServiceTxn(txn *sql.Tx, now time.Time, serviceUserID, roomID, serviceID string) error { |
||||
|
t := now.UnixNano() / 1000000 |
||||
|
_, err := txn.Exec(insertRoomServiceSQL, serviceUserID, roomID, serviceID, t) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
const deleteRoomServiceSQL = ` |
||||
|
DELETE FROM rooms_to_services WHERE service_user_id=$1 AND room_id = $2 AND service_id=$3 |
||||
|
` |
||||
|
|
||||
|
func deleteRoomServiceTxn(txn *sql.Tx, serviceUserID, roomID, serviceID string) error { |
||||
|
_, err := txn.Exec(deleteRoomServiceSQL, serviceUserID, roomID, serviceID) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
const selectRoomServicesSQL = ` |
||||
|
SELECT service_id FROM rooms_to_services WHERE service_user_id=$1 AND room_id=$2 |
||||
|
` |
||||
|
|
||||
|
func selectRoomServicesTxn(txn *sql.Tx, serviceUserID, roomID string) (serviceIDs []string, err error) { |
||||
|
rows, err := txn.Query(selectRoomServicesSQL, serviceUserID, roomID) |
||||
|
if err != nil { |
||||
|
return |
||||
|
} |
||||
|
defer rows.Close() |
||||
|
for rows.Next() { |
||||
|
var serviceID string |
||||
|
if err = rows.Scan(&serviceID); err != nil { |
||||
|
return |
||||
|
} |
||||
|
serviceIDs = append(serviceIDs, serviceID) |
||||
|
} |
||||
|
return |
||||
|
} |
@ -0,0 +1,51 @@ |
|||||
|
package database |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"github.com/matrix-org/go-neb/plugin" |
||||
|
"net/url" |
||||
|
) |
||||
|
|
||||
|
// A ClientConfig is the configuration for a matrix client for a bot to use.
|
||||
|
type ClientConfig struct { |
||||
|
UserID string // The matrix UserId to connect with.
|
||||
|
HomeserverURL string // A URL with the host and port of the matrix server. E.g. https://matrix.org:8448
|
||||
|
AccessToken string // The matrix access token to authenticate the requests with.
|
||||
|
} |
||||
|
|
||||
|
// Check that the client has the correct fields.
|
||||
|
func (c *ClientConfig) Check() error { |
||||
|
if c.UserID == "" || c.HomeserverURL == "" || c.AccessToken == "" { |
||||
|
return errors.New(`Must supply a "UserID", a "HomeserverURL", and an "AccessToken"`) |
||||
|
} |
||||
|
if _, err := url.Parse(c.HomeserverURL); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// A Service is the configuration for a bot service.
|
||||
|
type Service interface { |
||||
|
ServiceUserID() string |
||||
|
ServiceID() string |
||||
|
ServiceType() string |
||||
|
RoomIDs() []string |
||||
|
Plugin(roomID string) plugin.Plugin |
||||
|
} |
||||
|
|
||||
|
var servicesByType = map[string]func(string) Service{} |
||||
|
|
||||
|
// RegisterService registers a factory for creating Service instances.
|
||||
|
func RegisterService(factory func(string) Service) { |
||||
|
servicesByType[factory("").ServiceType()] = factory |
||||
|
} |
||||
|
|
||||
|
// CreateService creates a Service of the given type and serviceID.
|
||||
|
// Returns nil if the Service couldn't be created.
|
||||
|
func CreateService(serviceID, serviceType string) Service { |
||||
|
f := servicesByType[serviceType] |
||||
|
if f == nil { |
||||
|
return nil |
||||
|
} |
||||
|
return f(serviceID) |
||||
|
} |
@ -0,0 +1,20 @@ |
|||||
|
package errors |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
) |
||||
|
|
||||
|
// HTTPError An HTTP Error response, which may wrap an underlying native Go Error.
|
||||
|
type HTTPError struct { |
||||
|
WrappedError error |
||||
|
Message string |
||||
|
Code int |
||||
|
} |
||||
|
|
||||
|
func (e HTTPError) Error() string { |
||||
|
var wrappedErrMsg string |
||||
|
if e.WrappedError != nil { |
||||
|
wrappedErrMsg = e.WrappedError.Error() |
||||
|
} |
||||
|
return fmt.Sprintf("%s: %d: %s", e.Message, e.Code, wrappedErrMsg) |
||||
|
} |
@ -0,0 +1,35 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
log "github.com/Sirupsen/logrus" |
||||
|
"github.com/matrix-org/go-neb/clients" |
||||
|
"github.com/matrix-org/go-neb/database" |
||||
|
_ "github.com/matrix-org/go-neb/services/echo" |
||||
|
"github.com/matrix-org/go-neb/server" |
||||
|
_ "github.com/mattn/go-sqlite3" |
||||
|
"net/http" |
||||
|
_ "net/http/pprof" |
||||
|
"os" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
bindAddress := os.Getenv("BIND_ADDRESS") |
||||
|
databaseType := os.Getenv("DATABASE_TYPE") |
||||
|
databaseURL := os.Getenv("DATABASE_URL") |
||||
|
|
||||
|
db, err := database.Open(databaseType, databaseURL) |
||||
|
if err != nil { |
||||
|
log.Panic(err) |
||||
|
} |
||||
|
|
||||
|
clients := clients.Make(db) |
||||
|
if err := clients.Start(); err != nil { |
||||
|
log.Panic(err) |
||||
|
} |
||||
|
|
||||
|
http.Handle("/test", server.MakeJSONAPI(&heartbeatHandler{})) |
||||
|
http.Handle("/admin/configureClient", server.MakeJSONAPI(&configureClientHandler{db: db, clients: clients})) |
||||
|
http.Handle("/admin/configureService", server.MakeJSONAPI(&configureServiceHandler{db: db, clients: clients})) |
||||
|
|
||||
|
http.ListenAndServe(bindAddress, nil) |
||||
|
} |
@ -0,0 +1,301 @@ |
|||||
|
// Package matrix provides an HTTP client that can interact with a Homeserver via r0 APIs (/sync).
|
||||
|
//
|
||||
|
// It is NOT safe to access the field (or any sub-fields of) 'Rooms' concurrently. In essence, this
|
||||
|
// structure MUST be treated as read-only. The matrix client will update this structure as new events
|
||||
|
// arrive from the homeserver.
|
||||
|
//
|
||||
|
// Internally, the client has 1 goroutine for polling the server, and 1 goroutine for processing data
|
||||
|
// returned. The polling goroutine communicates to the processing goroutine by a buffered channel
|
||||
|
// which feedback loops if processing takes a while as it will delay more data from being pulled down
|
||||
|
// if the buffer gets full. Modification of the 'Rooms' field of the client is done EXCLUSIVELY on the
|
||||
|
// processing goroutine.
|
||||
|
package matrix |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"encoding/json" |
||||
|
log "github.com/Sirupsen/logrus" |
||||
|
"github.com/matrix-org/go-neb/errors" |
||||
|
"io/ioutil" |
||||
|
"net/http" |
||||
|
"net/url" |
||||
|
"path" |
||||
|
"strconv" |
||||
|
"sync" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
filterJSON = json.RawMessage(`{"room":{"timeline":{"limit":0}}}`) |
||||
|
) |
||||
|
|
||||
|
// Client represents a Matrix client.
|
||||
|
type Client struct { |
||||
|
HomeserverURL *url.URL |
||||
|
Prefix string |
||||
|
UserID string |
||||
|
AccessToken string |
||||
|
Rooms map[string]*Room |
||||
|
Worker *Worker |
||||
|
syncingMutex sync.Mutex |
||||
|
syncingID uint32 // Identifies the current Sync. Only one Sync can be active at any given time.
|
||||
|
httpClient *http.Client |
||||
|
filterID string |
||||
|
} |
||||
|
|
||||
|
func (cli *Client) buildURL(urlPath ...string) string { |
||||
|
// copy the URL. Purposefully ignore error as the input is from a valid URL already
|
||||
|
hsURL, _ := url.Parse(cli.HomeserverURL.String()) |
||||
|
parts := []string{hsURL.Path, cli.Prefix} |
||||
|
parts = append(parts, urlPath...) |
||||
|
hsURL.Path = path.Join(parts...) |
||||
|
query := hsURL.Query() |
||||
|
query.Set("access_token", cli.AccessToken) |
||||
|
hsURL.RawQuery = query.Encode() |
||||
|
return hsURL.String() |
||||
|
} |
||||
|
|
||||
|
func (cli *Client) buildURLWithQuery(urlPath []string, urlQuery map[string]string) string { |
||||
|
u, _ := url.Parse(cli.buildURL(urlPath...)) |
||||
|
q := u.Query() |
||||
|
for k, v := range urlQuery { |
||||
|
q.Set(k, v) |
||||
|
} |
||||
|
u.RawQuery = q.Encode() |
||||
|
return u.String() |
||||
|
} |
||||
|
|
||||
|
// JoinRoom joins the client to a room ID or alias. Returns a room ID.
|
||||
|
func (cli *Client) JoinRoom(roomIDorAlias, serverName string) (string, error) { |
||||
|
var urlPath string |
||||
|
if serverName != "" { |
||||
|
urlPath = cli.buildURLWithQuery([]string{"join", roomIDorAlias}, map[string]string{ |
||||
|
"server_name": serverName, |
||||
|
}) |
||||
|
} else { |
||||
|
urlPath = cli.buildURL("join", roomIDorAlias) |
||||
|
} |
||||
|
|
||||
|
resBytes, err := cli.sendJSON("POST", urlPath, `{}`) |
||||
|
if err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
var joinRoomResponse joinRoomHTTPResponse |
||||
|
if err = json.Unmarshal(resBytes, &joinRoomResponse); err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
return joinRoomResponse.RoomID, nil |
||||
|
} |
||||
|
|
||||
|
// SendMessageEvent sends a message event into a room, returning the event_id on success.
|
||||
|
// contentJSON should be a pointer to something that can be encoded as JSON using json.Marshal.
|
||||
|
func (cli *Client) SendMessageEvent(roomID string, eventType string, contentJSON interface{}) (string, error) { |
||||
|
txnID := "go" + strconv.FormatInt(time.Now().UnixNano(), 10) |
||||
|
urlPath := cli.buildURL("rooms", roomID, "send", eventType, txnID) |
||||
|
resBytes, err := cli.sendJSON("PUT", urlPath, contentJSON) |
||||
|
if err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
var sendEventResponse sendEventHTTPResponse |
||||
|
if err = json.Unmarshal(resBytes, &sendEventResponse); err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
return sendEventResponse.EventID, nil |
||||
|
} |
||||
|
|
||||
|
// SendText sends an m.room.message event into the given room with a msgtype of m.text
|
||||
|
func (cli *Client) SendText(roomID, text string) (string, error) { |
||||
|
return cli.SendMessageEvent(roomID, "m.room.message", |
||||
|
TextMessage{"m.text", text}) |
||||
|
} |
||||
|
|
||||
|
// Sync starts syncing with the provided Homeserver. This function will be invoked continually.
|
||||
|
// If Sync is called twice then the first sync will be stopped.
|
||||
|
func (cli *Client) Sync() { |
||||
|
// Mark the client as syncing.
|
||||
|
// We will keep syncing until the syncing state changes. Either because
|
||||
|
// Sync is called or StopSync is called.
|
||||
|
syncingID := cli.incrementSyncingID() |
||||
|
logger := log.WithFields(log.Fields{ |
||||
|
"syncing": syncingID, |
||||
|
"user_id": cli.UserID, |
||||
|
}) |
||||
|
|
||||
|
// TODO: Store the filter ID and sync token in the database
|
||||
|
filterID, err := cli.createFilter() |
||||
|
if err != nil { |
||||
|
logger.WithError(err).Fatal("Failed to create filter") |
||||
|
// TODO: Maybe do some sort of error handling here?
|
||||
|
} |
||||
|
cli.filterID = filterID |
||||
|
logger.WithField("filter", filterID).Print("Got filter ID") |
||||
|
nextToken := "" |
||||
|
|
||||
|
logger.Print("Starting sync") |
||||
|
|
||||
|
channel := make(chan syncHTTPResponse, 5) |
||||
|
|
||||
|
go func() { |
||||
|
for response := range channel { |
||||
|
cli.Worker.onSyncHTTPResponse(response) |
||||
|
} |
||||
|
}() |
||||
|
defer close(channel) |
||||
|
|
||||
|
for { |
||||
|
// Do a /sync
|
||||
|
syncBytes, err := cli.doSync(30000, nextToken) |
||||
|
if err != nil { |
||||
|
logger.WithError(err).Warn("doSync failed") |
||||
|
time.Sleep(5 * time.Second) |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
// Decode sync response into syncHTTPResponse
|
||||
|
var syncResponse syncHTTPResponse |
||||
|
if err = json.Unmarshal(syncBytes, &syncResponse); err != nil { |
||||
|
logger.WithError(err).Warn("Failed to decode sync data") |
||||
|
time.Sleep(5 * time.Second) |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
// Check that the syncing state hasn't changed
|
||||
|
// Either because we've stopped syncing or another sync has been started.
|
||||
|
// We discard the response from our sync.
|
||||
|
// TODO: Store the next_batch token so that the next sync can resume
|
||||
|
// from where this sync left off.
|
||||
|
if cli.getSyncingID() != syncingID { |
||||
|
logger.Print("Stopping sync") |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// Update client state
|
||||
|
nextToken = syncResponse.NextBatch |
||||
|
logger.WithField("next_batch", nextToken).Print("Received sync response") |
||||
|
channel <- syncResponse |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (cli *Client) incrementSyncingID() uint32 { |
||||
|
cli.syncingMutex.Lock() |
||||
|
defer cli.syncingMutex.Unlock() |
||||
|
cli.syncingID++ |
||||
|
return cli.syncingID |
||||
|
} |
||||
|
|
||||
|
func (cli *Client) getSyncingID() uint32 { |
||||
|
cli.syncingMutex.Lock() |
||||
|
defer cli.syncingMutex.Unlock() |
||||
|
return cli.syncingID |
||||
|
} |
||||
|
|
||||
|
// StopSync stops the ongoing sync started by Sync.
|
||||
|
func (cli *Client) StopSync() { |
||||
|
// Advance the syncing state so that any running Syncs will terminate.
|
||||
|
cli.incrementSyncingID() |
||||
|
} |
||||
|
|
||||
|
// This should only be called by the worker goroutine
|
||||
|
func (cli *Client) getOrCreateRoom(roomID string) *Room { |
||||
|
room := cli.Rooms[roomID] |
||||
|
if room == nil { // create a new Room
|
||||
|
room = NewRoom(roomID) |
||||
|
cli.Rooms[roomID] = room |
||||
|
} |
||||
|
return room |
||||
|
} |
||||
|
|
||||
|
func (cli *Client) sendJSON(method string, httpURL string, contentJSON interface{}) ([]byte, error) { |
||||
|
jsonStr, err := json.Marshal(contentJSON) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
req, err := http.NewRequest(method, httpURL, bytes.NewBuffer(jsonStr)) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
req.Header.Set("Content-Type", "application/json") |
||||
|
logger := log.WithFields(log.Fields{ |
||||
|
"method": method, |
||||
|
"url": httpURL, |
||||
|
"json": string(jsonStr), |
||||
|
}) |
||||
|
logger.Print("Sending JSON request") |
||||
|
res, err := cli.httpClient.Do(req) |
||||
|
if err != nil { |
||||
|
logger.WithError(err).Warn("Failed to send JSON request") |
||||
|
return nil, err |
||||
|
} |
||||
|
defer res.Body.Close() |
||||
|
contents, err := ioutil.ReadAll(res.Body) |
||||
|
if res.StatusCode >= 300 { |
||||
|
logger.WithFields(log.Fields{ |
||||
|
"code": res.StatusCode, |
||||
|
"body": string(contents), |
||||
|
}).Warn("Failed to send JSON request") |
||||
|
return nil, errors.HTTPError{ |
||||
|
Code: res.StatusCode, |
||||
|
Message: "Failed to " + method + " JSON: HTTP " + strconv.Itoa(res.StatusCode), |
||||
|
} |
||||
|
} |
||||
|
if err != nil { |
||||
|
logger.WithError(err).Warn("Failed to read response") |
||||
|
return nil, err |
||||
|
} |
||||
|
return contents, nil |
||||
|
} |
||||
|
|
||||
|
func (cli *Client) createFilter() (string, error) { |
||||
|
urlPath := cli.buildURL("user", cli.UserID, "filter") |
||||
|
resBytes, err := cli.sendJSON("POST", urlPath, &filterJSON) |
||||
|
if err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
var filterResponse filterHTTPResponse |
||||
|
if err = json.Unmarshal(resBytes, &filterResponse); err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
return filterResponse.FilterID, nil |
||||
|
} |
||||
|
|
||||
|
func (cli *Client) doSync(timeout int, since string) ([]byte, error) { |
||||
|
query := map[string]string{ |
||||
|
"timeout": strconv.Itoa(timeout), |
||||
|
} |
||||
|
if since != "" { |
||||
|
query["since"] = since |
||||
|
} |
||||
|
if cli.filterID != "" { |
||||
|
query["filter"] = cli.filterID |
||||
|
} |
||||
|
urlPath := cli.buildURLWithQuery([]string{"sync"}, query) |
||||
|
log.WithFields(log.Fields{ |
||||
|
"since": since, |
||||
|
"timeout": timeout, |
||||
|
}).Print("Syncing") |
||||
|
res, err := http.Get(urlPath) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
defer res.Body.Close() |
||||
|
contents, err := ioutil.ReadAll(res.Body) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
return contents, nil |
||||
|
} |
||||
|
|
||||
|
// NewClient creates a new Matrix Client ready for syncing
|
||||
|
func NewClient(homeserverURL *url.URL, accessToken string, userID string) *Client { |
||||
|
cli := Client{ |
||||
|
AccessToken: accessToken, |
||||
|
HomeserverURL: homeserverURL, |
||||
|
UserID: userID, |
||||
|
Prefix: "/_matrix/client/r0", |
||||
|
} |
||||
|
cli.Worker = newWorker(&cli) |
||||
|
cli.Rooms = make(map[string]*Room) |
||||
|
cli.httpClient = &http.Client{} |
||||
|
|
||||
|
return &cli |
||||
|
} |
@ -0,0 +1,40 @@ |
|||||
|
package matrix |
||||
|
|
||||
|
type filterHTTPResponse struct { |
||||
|
FilterID string `json:"filter_id"` |
||||
|
} |
||||
|
|
||||
|
type joinRoomHTTPResponse struct { |
||||
|
RoomID string `json:"room_id"` |
||||
|
} |
||||
|
|
||||
|
type sendEventHTTPResponse struct { |
||||
|
EventID string `json:"event_id"` |
||||
|
} |
||||
|
|
||||
|
type syncHTTPResponse struct { |
||||
|
NextBatch string `json:"next_batch"` |
||||
|
AccountData struct { |
||||
|
Events []Event `json:"events"` |
||||
|
} `json:"account_data"` |
||||
|
Presence struct { |
||||
|
Events []Event `json:"events"` |
||||
|
} `json:"presence"` |
||||
|
Rooms struct { |
||||
|
Join map[string]struct { |
||||
|
State struct { |
||||
|
Events []Event `json:"events"` |
||||
|
} `json:"state"` |
||||
|
Timeline struct { |
||||
|
Events []Event `json:"events"` |
||||
|
Limited bool `json:"limited"` |
||||
|
PrevBatch string `json:"prev_batch"` |
||||
|
} `json:"timeline"` |
||||
|
} `json:"join"` |
||||
|
Invite map[string]struct { |
||||
|
State struct { |
||||
|
Events []Event |
||||
|
} `json:"invite_state"` |
||||
|
} `json:"invite"` |
||||
|
} `json:"rooms"` |
||||
|
} |
@ -0,0 +1,116 @@ |
|||||
|
package matrix |
||||
|
|
||||
|
import ( |
||||
|
"html" |
||||
|
"regexp" |
||||
|
) |
||||
|
|
||||
|
// Room represents a single Matrix room.
|
||||
|
type Room struct { |
||||
|
ID string |
||||
|
State map[string]map[string]*Event |
||||
|
Timeline []Event |
||||
|
} |
||||
|
|
||||
|
// UpdateState updates the room's current state with the given Event. This will clobber events based
|
||||
|
// on the type/state_key combination.
|
||||
|
func (room Room) UpdateState(event *Event) { |
||||
|
_, exists := room.State[event.Type] |
||||
|
if !exists { |
||||
|
room.State[event.Type] = make(map[string]*Event) |
||||
|
} |
||||
|
room.State[event.Type][event.StateKey] = event |
||||
|
} |
||||
|
|
||||
|
// GetStateEvent returns the state event for the given type/state_key combo, or nil.
|
||||
|
func (room Room) GetStateEvent(eventType string, stateKey string) *Event { |
||||
|
stateEventMap, _ := room.State[eventType] |
||||
|
event, _ := stateEventMap[stateKey] |
||||
|
return event |
||||
|
} |
||||
|
|
||||
|
// GetMembershipState returns the membership state of the given user ID in this room. If there is
|
||||
|
// no entry for this member, 'leave' is returned for consistency with left users.
|
||||
|
func (room Room) GetMembershipState(userID string) string { |
||||
|
state := "leave" |
||||
|
event := room.GetStateEvent("m.room.member", userID) |
||||
|
if event != nil { |
||||
|
membershipState, found := event.Content["membership"] |
||||
|
if found { |
||||
|
mState, isString := membershipState.(string) |
||||
|
if isString { |
||||
|
state = mState |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
return state |
||||
|
} |
||||
|
|
||||
|
// NewRoom creates a new Room with the given ID
|
||||
|
func NewRoom(roomID string) *Room { |
||||
|
// Init the State map and return a pointer to the Room
|
||||
|
return &Room{ |
||||
|
ID: roomID, |
||||
|
State: make(map[string]map[string]*Event), |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Event represents a single Matrix event.
|
||||
|
type Event struct { |
||||
|
StateKey string `json:"state_key"` // The state key for the event. Only present on State Events.
|
||||
|
Sender string `json:"sender"` // The user ID of the sender of the event
|
||||
|
Type string `json:"type"` // The event type
|
||||
|
Timestamp int `json:"origin_server_ts"` // The unix timestamp when this message was sent by the origin server
|
||||
|
ID string `json:"event_id"` // The unique ID of this event
|
||||
|
RoomID string `json:"room_id"` // The room the event was sent to. May be nil (e.g. for presence)
|
||||
|
Content map[string]interface{} `json:"content"` // The JSON content of the event.
|
||||
|
} |
||||
|
|
||||
|
// Body returns the value of the "body" key in the event content if it is
|
||||
|
// present and is a string.
|
||||
|
func (event *Event) Body() (body string, ok bool) { |
||||
|
value, exists := event.Content["body"] |
||||
|
if !exists { |
||||
|
return |
||||
|
} |
||||
|
body, ok = value.(string) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// MessageType returns the value of the "msgtype" key in the event content if
|
||||
|
// it is present and is a string.
|
||||
|
func (event *Event) MessageType() (msgtype string, ok bool) { |
||||
|
value, exists := event.Content["msgtype"] |
||||
|
if !exists { |
||||
|
return |
||||
|
} |
||||
|
msgtype, ok = value.(string) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// TextMessage is the contents of a Matrix formated message event.
|
||||
|
type TextMessage struct { |
||||
|
MsgType string `json:"msgtype"` |
||||
|
Body string `json:"body"` |
||||
|
} |
||||
|
|
||||
|
// An HTMLMessage is the contents of a Matrix HTML formated message event.
|
||||
|
type HTMLMessage struct { |
||||
|
Body string `json:"body"` |
||||
|
MsgType string `json:"msgtype"` |
||||
|
Format string `json:"format"` |
||||
|
FormattedBody string `json:"formatted_body"` |
||||
|
} |
||||
|
|
||||
|
var htmlRegex = regexp.MustCompile("<[^<]+?>") |
||||
|
|
||||
|
// GetHTMLMessage returns an HTMLMessage with the body set to a stripped version of the provided HTML, in addition
|
||||
|
// to the provided HTML.
|
||||
|
func GetHTMLMessage(msgtype, htmlText string) HTMLMessage { |
||||
|
return HTMLMessage{ |
||||
|
Body: html.UnescapeString(htmlRegex.ReplaceAllLiteralString(htmlText, "")), |
||||
|
MsgType: msgtype, |
||||
|
Format: "org.matrix.custom.html", |
||||
|
FormattedBody: htmlText, |
||||
|
} |
||||
|
} |
@ -0,0 +1,61 @@ |
|||||
|
package matrix |
||||
|
|
||||
|
// Worker processes incoming events and updates the Matrix client's data structures. It also informs
|
||||
|
// any attached listeners of the new events.
|
||||
|
type Worker struct { |
||||
|
client *Client |
||||
|
listeners map[string][]OnEventListener // event type to listeners array
|
||||
|
} |
||||
|
|
||||
|
// OnEventListener can be used with Worker.OnEventType to be informed of incoming events.
|
||||
|
type OnEventListener func(*Event) |
||||
|
|
||||
|
func newWorker(client *Client) *Worker { |
||||
|
return &Worker{ |
||||
|
client, |
||||
|
make(map[string][]OnEventListener), |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// OnEventType allows callers to be notified when there are new events for the given event type.
|
||||
|
// There are no duplicate checks.
|
||||
|
func (worker *Worker) OnEventType(eventType string, callback OnEventListener) { |
||||
|
_, exists := worker.listeners[eventType] |
||||
|
if !exists { |
||||
|
worker.listeners[eventType] = []OnEventListener{} |
||||
|
} |
||||
|
worker.listeners[eventType] = append(worker.listeners[eventType], callback) |
||||
|
} |
||||
|
|
||||
|
func (worker *Worker) notifyListeners(event *Event) { |
||||
|
listeners, exists := worker.listeners[event.Type] |
||||
|
if !exists { |
||||
|
return |
||||
|
} |
||||
|
for _, fn := range listeners { |
||||
|
fn(event) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (worker *Worker) onSyncHTTPResponse(res syncHTTPResponse) { |
||||
|
for roomID, roomData := range res.Rooms.Join { |
||||
|
room := worker.client.getOrCreateRoom(roomID) |
||||
|
for _, event := range roomData.State.Events { |
||||
|
event.RoomID = roomID |
||||
|
room.UpdateState(&event) |
||||
|
worker.notifyListeners(&event) |
||||
|
} |
||||
|
for _, event := range roomData.Timeline.Events { |
||||
|
event.RoomID = roomID |
||||
|
worker.notifyListeners(&event) |
||||
|
} |
||||
|
} |
||||
|
for roomID, roomData := range res.Rooms.Invite { |
||||
|
room := worker.client.getOrCreateRoom(roomID) |
||||
|
for _, event := range roomData.State.Events { |
||||
|
event.RoomID = roomID |
||||
|
room.UpdateState(&event) |
||||
|
worker.notifyListeners(&event) |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,169 @@ |
|||||
|
package plugin |
||||
|
|
||||
|
import ( |
||||
|
log "github.com/Sirupsen/logrus" |
||||
|
"github.com/matrix-org/go-neb/matrix" |
||||
|
"github.com/mattn/go-shellwords" |
||||
|
"regexp" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
// A Plugin is a list of commands and expansions to apply to incoming messages.
|
||||
|
type Plugin struct { |
||||
|
Commands []Command |
||||
|
Expansions []Expansion |
||||
|
} |
||||
|
|
||||
|
// A Command is something that a user invokes by sending a message starting with '!'
|
||||
|
// followed by a list of strings that name the command, followed by a list of argument
|
||||
|
// strings. The argument strings may be quoted using '\"' and '\'' in the same way
|
||||
|
// that they are quoted in the unix shell.
|
||||
|
type Command struct { |
||||
|
Path []string |
||||
|
Arguments []string |
||||
|
Help string |
||||
|
Command func(roomID, userID string, arguments []string) (content interface{}, err error) |
||||
|
} |
||||
|
|
||||
|
// An Expansion is something that actives when the user sends any message
|
||||
|
// containing a string matching a given pattern. For example an RFC expansion
|
||||
|
// might expand "RFC 6214" into "Adaptation of RFC 1149 for IPv6" and link to
|
||||
|
// the appropriate RFC.
|
||||
|
type Expansion struct { |
||||
|
Regexp *regexp.Regexp |
||||
|
Expand func(roomID, matchingText string) interface{} |
||||
|
} |
||||
|
|
||||
|
// matches if the arguments start with the path of the command.
|
||||
|
func (command *Command) matches(arguments []string) bool { |
||||
|
if len(arguments) < len(command.Path) { |
||||
|
return false |
||||
|
} |
||||
|
for i, segment := range command.Path { |
||||
|
if segment != arguments[i] { |
||||
|
return false |
||||
|
} |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
// runCommandForPlugin runs a single command read from a matrix event. Runs
|
||||
|
// the matching command with the longest path. Returns the JSON encodable
|
||||
|
// content of a single matrix message event to use as a response or nil if no
|
||||
|
// response is appropriate.
|
||||
|
func runCommandForPlugin(plugin Plugin, event *matrix.Event, arguments []string) interface{} { |
||||
|
var bestMatch *Command |
||||
|
for _, command := range plugin.Commands { |
||||
|
matches := command.matches(arguments) |
||||
|
betterMatch := bestMatch == nil || len(bestMatch.Path) < len(command.Path) |
||||
|
if matches && betterMatch { |
||||
|
bestMatch = &command |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if bestMatch == nil { |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
cmdArgs := arguments[len(bestMatch.Path):] |
||||
|
content, err := bestMatch.Command(event.RoomID, event.Sender, cmdArgs) |
||||
|
if err != nil { |
||||
|
if content != nil { |
||||
|
log.WithFields(log.Fields{ |
||||
|
log.ErrorKey: err, |
||||
|
"room_id": event.RoomID, |
||||
|
"user_id": event.Sender, |
||||
|
"command": bestMatch.Path, |
||||
|
"args": cmdArgs, |
||||
|
}).Warn("Command returned both error and content.") |
||||
|
} |
||||
|
content = matrix.TextMessage{"m.notice", err.Error()} |
||||
|
} |
||||
|
|
||||
|
return content |
||||
|
} |
||||
|
|
||||
|
// run the expansions for a matrix event.
|
||||
|
func runExpansionsForPlugin(plugin Plugin, event *matrix.Event, body string) []interface{} { |
||||
|
var responses []interface{} |
||||
|
|
||||
|
for _, expansion := range plugin.Expansions { |
||||
|
matches := map[string]bool{} |
||||
|
for _, matchingText := range expansion.Regexp.FindAllString(body, -1) { |
||||
|
if matches[matchingText] { |
||||
|
// Only expand the first occurance of a matching string
|
||||
|
continue |
||||
|
} |
||||
|
matches[matchingText] = true |
||||
|
if response := expansion.Expand(event.RoomID, matchingText); response != nil { |
||||
|
responses = append(responses, response) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return responses |
||||
|
} |
||||
|
|
||||
|
// runCommands runs the plugin commands or expansions for a single matrix
|
||||
|
// event. Returns a list of JSON encodable contents for the matrix messages
|
||||
|
// to use as responses.
|
||||
|
// If the message beings with '!' then it is assumed to be a command. Each
|
||||
|
// plugin is checked for a matching command, if a match is found then that
|
||||
|
// command is run. If more than one plugin has a matching command then all
|
||||
|
// of those commands are run. This shouldn't happen unless the same plugin
|
||||
|
// is installed multiple times since each plugin will usually have a
|
||||
|
// distinct prefix for its commands.
|
||||
|
// If the message doesn't begin with '!' then it is checked against the
|
||||
|
// expansions for each plugin.
|
||||
|
func runCommands(plugins []Plugin, event *matrix.Event) []interface{} { |
||||
|
body, ok := event.Body() |
||||
|
if !ok || body == "" { |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// filter m.notice to prevent loops
|
||||
|
if msgtype, ok := event.MessageType(); !ok || msgtype == "m.notice" { |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
var responses []interface{} |
||||
|
|
||||
|
if body[0] == '!' { |
||||
|
args, err := shellwords.Parse(body[1:]) |
||||
|
if err != nil { |
||||
|
args = strings.Split(body[1:], " ") |
||||
|
} |
||||
|
|
||||
|
for _, plugin := range plugins { |
||||
|
if response := runCommandForPlugin(plugin, event, args); response != nil { |
||||
|
responses = append(responses, response) |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
for _, plugin := range plugins { |
||||
|
expansions := runExpansionsForPlugin(plugin, event, body) |
||||
|
responses = append(responses, expansions...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return responses |
||||
|
} |
||||
|
|
||||
|
// OnMessage checks the message event to see whether it contains any commands
|
||||
|
// or expansions from the listed plugins and processes those commands or
|
||||
|
// expansions.
|
||||
|
func OnMessage(plugins []Plugin, client *matrix.Client, event *matrix.Event) { |
||||
|
responses := runCommands(plugins, event) |
||||
|
|
||||
|
for _, content := range responses { |
||||
|
_, err := client.SendMessageEvent(event.RoomID, "m.room.message", content) |
||||
|
if err != nil { |
||||
|
log.WithFields(log.Fields{ |
||||
|
log.ErrorKey: err, |
||||
|
"room_id": event.RoomID, |
||||
|
"user_id": event.Sender, |
||||
|
"content": content, |
||||
|
}).Print("Failed to send command response") |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,159 @@ |
|||||
|
package plugin |
||||
|
|
||||
|
import ( |
||||
|
"github.com/matrix-org/go-neb/matrix" |
||||
|
"reflect" |
||||
|
"regexp" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
myRoomID = "!room:example.com" |
||||
|
mySender = "@user:example.com" |
||||
|
) |
||||
|
|
||||
|
func makeTestEvent(msgtype, body string) *matrix.Event { |
||||
|
return &matrix.Event{ |
||||
|
Sender: mySender, |
||||
|
Type: "m.room.message", |
||||
|
RoomID: myRoomID, |
||||
|
Content: map[string]interface{}{ |
||||
|
"body": body, |
||||
|
"msgtype": msgtype, |
||||
|
}, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type testResponse struct { |
||||
|
RoomID string |
||||
|
Sender string |
||||
|
Arguments []string |
||||
|
} |
||||
|
|
||||
|
func makeTestResponse(roomID, sender string, arguments []string) interface{} { |
||||
|
return testResponse{roomID, sender, arguments} |
||||
|
} |
||||
|
|
||||
|
type testExpansion struct { |
||||
|
RoomID string |
||||
|
MatchingText string |
||||
|
} |
||||
|
|
||||
|
func makeTestExpansion(roomID, matchingText string) interface{} { |
||||
|
return testExpansion{roomID, matchingText} |
||||
|
} |
||||
|
|
||||
|
func makeTestPlugin(paths [][]string, regexps []*regexp.Regexp) Plugin { |
||||
|
var commands []Command |
||||
|
for _, path := range paths { |
||||
|
commands = append(commands, Command{ |
||||
|
Path: path, |
||||
|
Command: func(roomID, sender string, arguments []string) (interface{}, error) { |
||||
|
return makeTestResponse(roomID, sender, arguments), nil |
||||
|
}, |
||||
|
}) |
||||
|
} |
||||
|
var expansions []Expansion |
||||
|
for _, re := range regexps { |
||||
|
expansions = append(expansions, Expansion{ |
||||
|
Regexp: re, |
||||
|
Expand: makeTestExpansion, |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
return Plugin{Commands: commands, Expansions: expansions} |
||||
|
} |
||||
|
|
||||
|
func TestRunCommands(t *testing.T) { |
||||
|
plugins := []Plugin{makeTestPlugin([][]string{ |
||||
|
[]string{"test", "command"}, |
||||
|
}, nil)} |
||||
|
event := makeTestEvent("m.text", `!test command arg1 "arg 2" 'arg 3'`) |
||||
|
got := runCommands(plugins, event) |
||||
|
want := []interface{}{makeTestResponse(myRoomID, mySender, []string{ |
||||
|
"arg1", "arg 2", "arg 3", |
||||
|
})} |
||||
|
if !reflect.DeepEqual(got, want) { |
||||
|
t.Errorf("runCommands(%q, %q) == %q, want %q", plugins, event, got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestRunCommandsBestMatch(t *testing.T) { |
||||
|
plugins := []Plugin{makeTestPlugin([][]string{ |
||||
|
[]string{"test", "command"}, |
||||
|
[]string{"test", "command", "more", "specific"}, |
||||
|
}, nil)} |
||||
|
event := makeTestEvent("m.text", "!test command more specific arg1") |
||||
|
got := runCommands(plugins, event) |
||||
|
want := []interface{}{makeTestResponse(myRoomID, mySender, []string{ |
||||
|
"arg1", |
||||
|
})} |
||||
|
if !reflect.DeepEqual(got, want) { |
||||
|
t.Errorf("runCommands(%q, %q) == %q, want %q", plugins, event, got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestRunCommandsMultiplePlugins(t *testing.T) { |
||||
|
plugins := []Plugin{ |
||||
|
makeTestPlugin([][]string{[]string{"test", "command", "first"}}, nil), |
||||
|
makeTestPlugin([][]string{[]string{"test", "command"}}, nil), |
||||
|
} |
||||
|
event := makeTestEvent("m.text", "!test command first arg1") |
||||
|
got := runCommands(plugins, event) |
||||
|
want := []interface{}{ |
||||
|
makeTestResponse(myRoomID, mySender, []string{"arg1"}), |
||||
|
makeTestResponse(myRoomID, mySender, []string{"first", "arg1"}), |
||||
|
} |
||||
|
if !reflect.DeepEqual(got, want) { |
||||
|
t.Errorf("runCommands(%q, %q) == %q, want %q", plugins, event, got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestRunCommandsInvalidShell(t *testing.T) { |
||||
|
plugins := []Plugin{ |
||||
|
makeTestPlugin([][]string{[]string{"test", "command"}}, nil), |
||||
|
} |
||||
|
event := makeTestEvent("m.text", `!test command 'mismatched quotes"`) |
||||
|
got := runCommands(plugins, event) |
||||
|
want := []interface{}{ |
||||
|
makeTestResponse(myRoomID, mySender, []string{"'mismatched", `quotes"`}), |
||||
|
} |
||||
|
if !reflect.DeepEqual(got, want) { |
||||
|
t.Errorf("runCommands(%q, %q) == %q, want %q", plugins, event, got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestExpansion(t *testing.T) { |
||||
|
plugins := []Plugin{ |
||||
|
makeTestPlugin(nil, []*regexp.Regexp{ |
||||
|
regexp.MustCompile("a[^ ]*"), |
||||
|
regexp.MustCompile("b.."), |
||||
|
}), |
||||
|
} |
||||
|
event := makeTestEvent("m.text", "test banana for scale") |
||||
|
got := runCommands(plugins, event) |
||||
|
want := []interface{}{ |
||||
|
makeTestExpansion(myRoomID, "anana"), |
||||
|
makeTestExpansion(myRoomID, "ale"), |
||||
|
makeTestExpansion(myRoomID, "ban"), |
||||
|
} |
||||
|
if !reflect.DeepEqual(got, want) { |
||||
|
t.Errorf("runCommands(%q, %q) == %q, want %q", plugins, event, got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestExpansionDuplicateMatches(t *testing.T) { |
||||
|
plugins := []Plugin{ |
||||
|
makeTestPlugin(nil, []*regexp.Regexp{ |
||||
|
regexp.MustCompile("badger"), |
||||
|
}), |
||||
|
} |
||||
|
event := makeTestEvent("m.text", "badger badger badger") |
||||
|
got := runCommands(plugins, event) |
||||
|
want := []interface{}{ |
||||
|
makeTestExpansion(myRoomID, "badger"), |
||||
|
} |
||||
|
if !reflect.DeepEqual(got, want) { |
||||
|
t.Errorf("runCommands(%q, %q) == %q, want %q", plugins, event, got, want) |
||||
|
} |
||||
|
} |
@ -0,0 +1,105 @@ |
|||||
|
// Package server contains building blocks for REST APIs.
|
||||
|
package server |
||||
|
|
||||
|
import ( |
||||
|
"encoding/json" |
||||
|
log "github.com/Sirupsen/logrus" |
||||
|
"github.com/matrix-org/go-neb/errors" |
||||
|
"net/http" |
||||
|
) |
||||
|
|
||||
|
// JSONRequestHandler represents an interface that must be satisfied in order to respond to incoming
|
||||
|
// HTTP requests with JSON. The interface returned will be marshalled into JSON to be sent to the client,
|
||||
|
// unless the interface is []byte in which case the bytes are sent to the client unchanged.
|
||||
|
// If an error is returned, a JSON error response will also be returned, unless the error code
|
||||
|
// is a 302 REDIRECT in which case a redirect is sent based on the Message field.
|
||||
|
type JSONRequestHandler interface { |
||||
|
OnIncomingRequest(req *http.Request) (interface{}, *errors.HTTPError) |
||||
|
} |
||||
|
|
||||
|
// JSONError represents a JSON API error response
|
||||
|
type JSONError struct { |
||||
|
Message string `json:"message"` |
||||
|
} |
||||
|
|
||||
|
// WithCORSOptions intercepts all OPTIONS requests and responds with CORS headers. The request handler
|
||||
|
// is not invoked when this happens.
|
||||
|
func WithCORSOptions(handler http.HandlerFunc) http.HandlerFunc { |
||||
|
return func(w http.ResponseWriter, req *http.Request) { |
||||
|
if req.Method == "OPTIONS" { |
||||
|
SetCORSHeaders(w) |
||||
|
return |
||||
|
} |
||||
|
handler(w, req) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// MakeJSONAPI creates an HTTP handler which always responds to incoming requests with JSON responses.
|
||||
|
func MakeJSONAPI(handler JSONRequestHandler) http.HandlerFunc { |
||||
|
return func(w http.ResponseWriter, req *http.Request) { |
||||
|
log.WithFields(log.Fields{ |
||||
|
"method": req.Method, |
||||
|
"url": req.URL, |
||||
|
}).Print("Received request") |
||||
|
res, httpErr := handler.OnIncomingRequest(req) |
||||
|
|
||||
|
// Set common headers returned regardless of the outcome of the request
|
||||
|
w.Header().Set("Content-Type", "application/json") |
||||
|
SetCORSHeaders(w) |
||||
|
|
||||
|
if httpErr != nil { |
||||
|
jsonErrorResponse(w, req, httpErr) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// if they've returned bytes as the response, then just return them rather than marshalling as JSON.
|
||||
|
// This gives handlers an escape hatch if they want to return cached bytes.
|
||||
|
var resBytes []byte |
||||
|
resBytes, ok := res.([]byte) |
||||
|
if !ok { |
||||
|
r, err := json.Marshal(res) |
||||
|
if err != nil { |
||||
|
jsonErrorResponse(w, req, &errors.HTTPError{nil, "Failed to serialise response as JSON", 500}) |
||||
|
return |
||||
|
} |
||||
|
resBytes = r |
||||
|
} |
||||
|
w.Write(resBytes) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func jsonErrorResponse(w http.ResponseWriter, req *http.Request, httpErr *errors.HTTPError) { |
||||
|
if httpErr.Code == 302 { |
||||
|
log.WithField("err", httpErr.Error()).Print("Redirecting") |
||||
|
http.Redirect(w, req, httpErr.Message, 302) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
log.WithField("err", httpErr.Error()).Print("Request failed") |
||||
|
log.WithFields(log.Fields{ |
||||
|
"url": req.URL, |
||||
|
"code": httpErr.Code, |
||||
|
"message": httpErr.Message, |
||||
|
}).Print("Responding with error") |
||||
|
|
||||
|
w.WriteHeader(httpErr.Code) // Set response code
|
||||
|
|
||||
|
r, err := json.Marshal(&JSONError{ |
||||
|
Message: httpErr.Message, |
||||
|
}) |
||||
|
if err != nil { |
||||
|
// We should never fail to marshal the JSON error response, but in this event just skip
|
||||
|
// marshalling altogether
|
||||
|
log.Warn("Failed to marshal error response") |
||||
|
w.Write([]byte(`{}`)) |
||||
|
return |
||||
|
} |
||||
|
w.Write(r) |
||||
|
} |
||||
|
|
||||
|
// SetCORSHeaders sets unrestricted origin Access-Control headers on the response writer
|
||||
|
func SetCORSHeaders(w http.ResponseWriter) { |
||||
|
w.Header().Set("Access-Control-Allow-Origin", "*") |
||||
|
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") |
||||
|
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept") |
||||
|
} |
@ -0,0 +1,37 @@ |
|||||
|
package services |
||||
|
|
||||
|
import ( |
||||
|
"github.com/matrix-org/go-neb/database" |
||||
|
"github.com/matrix-org/go-neb/matrix" |
||||
|
"github.com/matrix-org/go-neb/plugin" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
type echoService struct { |
||||
|
id string |
||||
|
UserID string |
||||
|
Rooms []string |
||||
|
} |
||||
|
|
||||
|
func (e *echoService) ServiceUserID() string { return e.UserID } |
||||
|
func (e *echoService) ServiceID() string { return e.id } |
||||
|
func (e *echoService) ServiceType() string { return "echo" } |
||||
|
func (e *echoService) RoomIDs() []string { return e.Rooms } |
||||
|
func (e *echoService) Plugin(roomID string) plugin.Plugin { |
||||
|
return plugin.Plugin{ |
||||
|
Commands: []plugin.Command{ |
||||
|
plugin.Command{ |
||||
|
Path: []string{"echo"}, |
||||
|
Command: func(roomID, userID string, args []string) (interface{}, error) { |
||||
|
return &matrix.TextMessage{"m.notice", strings.Join(args, " ")}, nil |
||||
|
}, |
||||
|
}, |
||||
|
}, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func init() { |
||||
|
database.RegisterService(func(serviceID string) database.Service { |
||||
|
return &echoService{id: serviceID} |
||||
|
}) |
||||
|
} |
@ -0,0 +1,35 @@ |
|||||
|
{ |
||||
|
"version": 0, |
||||
|
"dependencies": [ |
||||
|
{ |
||||
|
"importpath": "github.com/Sirupsen/logrus", |
||||
|
"repository": "https://github.com/Sirupsen/logrus", |
||||
|
"revision": "a283a10442df8dc09befd873fab202bf8a253d6a", |
||||
|
"branch": "master" |
||||
|
}, |
||||
|
{ |
||||
|
"importpath": "github.com/mattn/go-shellwords", |
||||
|
"repository": "https://github.com/mattn/go-shellwords", |
||||
|
"revision": "525bedee691b5a8df547cb5cf9f86b7fb1883e24", |
||||
|
"branch": "master" |
||||
|
}, |
||||
|
{ |
||||
|
"importpath": "github.com/mattn/go-sqlite3", |
||||
|
"repository": "https://github.com/mattn/go-sqlite3", |
||||
|
"revision": "e118d4451349065b8e7ce0f0af32e033995363f8", |
||||
|
"branch": "master" |
||||
|
}, |
||||
|
{ |
||||
|
"importpath": "gopkg.in/airbrake/gobrake.v2", |
||||
|
"repository": "https://gopkg.in/airbrake/gobrake.v2", |
||||
|
"revision": "31c8ff1fb8b79a6947e6565e9a6df535f98a6b94", |
||||
|
"branch": "master" |
||||
|
}, |
||||
|
{ |
||||
|
"importpath": "gopkg.in/gemnasium/logrus-airbrake-hook.v2", |
||||
|
"repository": "https://gopkg.in/gemnasium/logrus-airbrake-hook.v2", |
||||
|
"revision": "31e6fd4bd5a98d8ee7673d24bc54ec73c31810dd", |
||||
|
"branch": "master" |
||||
|
} |
||||
|
] |
||||
|
} |
@ -0,0 +1,66 @@ |
|||||
|
# 0.10.0 |
||||
|
|
||||
|
* feature: Add a test hook (#180) |
||||
|
* feature: `ParseLevel` is now case-insensitive (#326) |
||||
|
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) |
||||
|
* performance: avoid re-allocations on `WithFields` (#335) |
||||
|
|
||||
|
# 0.9.0 |
||||
|
|
||||
|
* logrus/text_formatter: don't emit empty msg |
||||
|
* logrus/hooks/airbrake: move out of main repository |
||||
|
* logrus/hooks/sentry: move out of main repository |
||||
|
* logrus/hooks/papertrail: move out of main repository |
||||
|
* logrus/hooks/bugsnag: move out of main repository |
||||
|
* logrus/core: run tests with `-race` |
||||
|
* logrus/core: detect TTY based on `stderr` |
||||
|
* logrus/core: support `WithError` on logger |
||||
|
* logrus/core: Solaris support |
||||
|
|
||||
|
# 0.8.7 |
||||
|
|
||||
|
* logrus/core: fix possible race (#216) |
||||
|
* logrus/doc: small typo fixes and doc improvements |
||||
|
|
||||
|
|
||||
|
# 0.8.6 |
||||
|
|
||||
|
* hooks/raven: allow passing an initialized client |
||||
|
|
||||
|
# 0.8.5 |
||||
|
|
||||
|
* logrus/core: revert #208 |
||||
|
|
||||
|
# 0.8.4 |
||||
|
|
||||
|
* formatter/text: fix data race (#218) |
||||
|
|
||||
|
# 0.8.3 |
||||
|
|
||||
|
* logrus/core: fix entry log level (#208) |
||||
|
* logrus/core: improve performance of text formatter by 40% |
||||
|
* logrus/core: expose `LevelHooks` type |
||||
|
* logrus/core: add support for DragonflyBSD and NetBSD |
||||
|
* formatter/text: print structs more verbosely |
||||
|
|
||||
|
# 0.8.2 |
||||
|
|
||||
|
* logrus: fix more Fatal family functions |
||||
|
|
||||
|
# 0.8.1 |
||||
|
|
||||
|
* logrus: fix not exiting on `Fatalf` and `Fatalln` |
||||
|
|
||||
|
# 0.8.0 |
||||
|
|
||||
|
* logrus: defaults to stderr instead of stdout |
||||
|
* hooks/sentry: add special field for `*http.Request` |
||||
|
* formatter/text: ignore Windows for colors |
||||
|
|
||||
|
# 0.7.3 |
||||
|
|
||||
|
* formatter/\*: allow configuration of timestamp layout |
||||
|
|
||||
|
# 0.7.2 |
||||
|
|
||||
|
* formatter/text: Add configuration option for time format (#158) |
@ -0,0 +1,21 @@ |
|||||
|
The MIT License (MIT) |
||||
|
|
||||
|
Copyright (c) 2014 Simon Eskildsen |
||||
|
|
||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
|
of this software and associated documentation files (the "Software"), to deal |
||||
|
in the Software without restriction, including without limitation the rights |
||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
|
copies of the Software, and to permit persons to whom the Software is |
||||
|
furnished to do so, subject to the following conditions: |
||||
|
|
||||
|
The above copyright notice and this permission notice shall be included in |
||||
|
all copies or substantial portions of the Software. |
||||
|
|
||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
||||
|
THE SOFTWARE. |
@ -0,0 +1,402 @@ |
|||||
|
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) |
||||
|
|
||||
|
Logrus is a structured logger for Go (golang), completely API compatible with |
||||
|
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not |
||||
|
yet stable (pre 1.0). Logrus itself is completely stable and has been used in |
||||
|
many large deployments. The core API is unlikely to change much but please |
||||
|
version control your Logrus to make sure you aren't fetching latest `master` on |
||||
|
every build.** |
||||
|
|
||||
|
Nicely color-coded in development (when a TTY is attached, otherwise just |
||||
|
plain text): |
||||
|
|
||||
|
![Colored](http://i.imgur.com/PY7qMwd.png) |
||||
|
|
||||
|
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash |
||||
|
or Splunk: |
||||
|
|
||||
|
```json |
||||
|
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the |
||||
|
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} |
||||
|
|
||||
|
{"level":"warning","msg":"The group's number increased tremendously!", |
||||
|
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} |
||||
|
|
||||
|
{"animal":"walrus","level":"info","msg":"A giant walrus appears!", |
||||
|
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} |
||||
|
|
||||
|
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", |
||||
|
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} |
||||
|
|
||||
|
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, |
||||
|
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} |
||||
|
``` |
||||
|
|
||||
|
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not |
||||
|
attached, the output is compatible with the |
||||
|
[logfmt](http://godoc.org/github.com/kr/logfmt) format: |
||||
|
|
||||
|
```text |
||||
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 |
||||
|
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 |
||||
|
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true |
||||
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 |
||||
|
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 |
||||
|
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true |
||||
|
exit status 1 |
||||
|
``` |
||||
|
|
||||
|
#### Example |
||||
|
|
||||
|
The simplest way to use Logrus is simply the package-level exported logger: |
||||
|
|
||||
|
```go |
||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
log "github.com/Sirupsen/logrus" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
log.WithFields(log.Fields{ |
||||
|
"animal": "walrus", |
||||
|
}).Info("A walrus appears") |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
Note that it's completely api-compatible with the stdlib logger, so you can |
||||
|
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` |
||||
|
and you'll now have the flexibility of Logrus. You can customize it all you |
||||
|
want: |
||||
|
|
||||
|
```go |
||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"os" |
||||
|
log "github.com/Sirupsen/logrus" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
// Log as JSON instead of the default ASCII formatter. |
||||
|
log.SetFormatter(&log.JSONFormatter{}) |
||||
|
|
||||
|
// Output to stderr instead of stdout, could also be a file. |
||||
|
log.SetOutput(os.Stderr) |
||||
|
|
||||
|
// Only log the warning severity or above. |
||||
|
log.SetLevel(log.WarnLevel) |
||||
|
} |
||||
|
|
||||
|
func main() { |
||||
|
log.WithFields(log.Fields{ |
||||
|
"animal": "walrus", |
||||
|
"size": 10, |
||||
|
}).Info("A group of walrus emerges from the ocean") |
||||
|
|
||||
|
log.WithFields(log.Fields{ |
||||
|
"omg": true, |
||||
|
"number": 122, |
||||
|
}).Warn("The group's number increased tremendously!") |
||||
|
|
||||
|
log.WithFields(log.Fields{ |
||||
|
"omg": true, |
||||
|
"number": 100, |
||||
|
}).Fatal("The ice breaks!") |
||||
|
|
||||
|
// A common pattern is to re-use fields between logging statements by re-using |
||||
|
// the logrus.Entry returned from WithFields() |
||||
|
contextLogger := log.WithFields(log.Fields{ |
||||
|
"common": "this is a common field", |
||||
|
"other": "I also should be logged always", |
||||
|
}) |
||||
|
|
||||
|
contextLogger.Info("I'll be logged with common and other field") |
||||
|
contextLogger.Info("Me too") |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
For more advanced usage such as logging to multiple locations from the same |
||||
|
application, you can also create an instance of the `logrus` Logger: |
||||
|
|
||||
|
```go |
||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"github.com/Sirupsen/logrus" |
||||
|
) |
||||
|
|
||||
|
// Create a new instance of the logger. You can have any number of instances. |
||||
|
var log = logrus.New() |
||||
|
|
||||
|
func main() { |
||||
|
// The API for setting attributes is a little different than the package level |
||||
|
// exported logger. See Godoc. |
||||
|
log.Out = os.Stderr |
||||
|
|
||||
|
log.WithFields(logrus.Fields{ |
||||
|
"animal": "walrus", |
||||
|
"size": 10, |
||||
|
}).Info("A group of walrus emerges from the ocean") |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
#### Fields |
||||
|
|
||||
|
Logrus encourages careful, structured logging though logging fields instead of |
||||
|
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed |
||||
|
to send event %s to topic %s with key %d")`, you should log the much more |
||||
|
discoverable: |
||||
|
|
||||
|
```go |
||||
|
log.WithFields(log.Fields{ |
||||
|
"event": event, |
||||
|
"topic": topic, |
||||
|
"key": key, |
||||
|
}).Fatal("Failed to send event") |
||||
|
``` |
||||
|
|
||||
|
We've found this API forces you to think about logging in a way that produces |
||||
|
much more useful logging messages. We've been in countless situations where just |
||||
|
a single added field to a log statement that was already there would've saved us |
||||
|
hours. The `WithFields` call is optional. |
||||
|
|
||||
|
In general, with Logrus using any of the `printf`-family functions should be |
||||
|
seen as a hint you should add a field, however, you can still use the |
||||
|
`printf`-family functions with Logrus. |
||||
|
|
||||
|
#### Hooks |
||||
|
|
||||
|
You can add hooks for logging levels. For example to send errors to an exception |
||||
|
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to |
||||
|
multiple places simultaneously, e.g. syslog. |
||||
|
|
||||
|
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in |
||||
|
`init`: |
||||
|
|
||||
|
```go |
||||
|
import ( |
||||
|
log "github.com/Sirupsen/logrus" |
||||
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" |
||||
|
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" |
||||
|
"log/syslog" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
|
||||
|
// Use the Airbrake hook to report errors that have Error severity or above to |
||||
|
// an exception tracker. You can create custom hooks, see the Hooks section. |
||||
|
log.AddHook(airbrake.NewHook(123, "xyz", "production")) |
||||
|
|
||||
|
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") |
||||
|
if err != nil { |
||||
|
log.Error("Unable to connect to local syslog daemon") |
||||
|
} else { |
||||
|
log.AddHook(hook) |
||||
|
} |
||||
|
} |
||||
|
``` |
||||
|
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). |
||||
|
|
||||
|
| Hook | Description | |
||||
|
| ----- | ----------- | |
||||
|
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | |
||||
|
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | |
||||
|
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | |
||||
|
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | |
||||
|
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | |
||||
|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | |
||||
|
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | |
||||
|
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | |
||||
|
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | |
||||
|
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | |
||||
|
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | |
||||
|
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | |
||||
|
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | |
||||
|
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | |
||||
|
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | |
||||
|
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | |
||||
|
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | |
||||
|
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | |
||||
|
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | |
||||
|
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | |
||||
|
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | |
||||
|
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | |
||||
|
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | |
||||
|
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | |
||||
|
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | |
||||
|
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | |
||||
|
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| |
||||
|
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| |
||||
|
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | |
||||
|
|
||||
|
#### Level logging |
||||
|
|
||||
|
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. |
||||
|
|
||||
|
```go |
||||
|
log.Debug("Useful debugging information.") |
||||
|
log.Info("Something noteworthy happened!") |
||||
|
log.Warn("You should probably take a look at this.") |
||||
|
log.Error("Something failed but I'm not quitting.") |
||||
|
// Calls os.Exit(1) after logging |
||||
|
log.Fatal("Bye.") |
||||
|
// Calls panic() after logging |
||||
|
log.Panic("I'm bailing.") |
||||
|
``` |
||||
|
|
||||
|
You can set the logging level on a `Logger`, then it will only log entries with |
||||
|
that severity or anything above it: |
||||
|
|
||||
|
```go |
||||
|
// Will log anything that is info or above (warn, error, fatal, panic). Default. |
||||
|
log.SetLevel(log.InfoLevel) |
||||
|
``` |
||||
|
|
||||
|
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose |
||||
|
environment if your application has that. |
||||
|
|
||||
|
#### Entries |
||||
|
|
||||
|
Besides the fields added with `WithField` or `WithFields` some fields are |
||||
|
automatically added to all logging events: |
||||
|
|
||||
|
1. `time`. The timestamp when the entry was created. |
||||
|
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after |
||||
|
the `AddFields` call. E.g. `Failed to send event.` |
||||
|
3. `level`. The logging level. E.g. `info`. |
||||
|
|
||||
|
#### Environments |
||||
|
|
||||
|
Logrus has no notion of environment. |
||||
|
|
||||
|
If you wish for hooks and formatters to only be used in specific environments, |
||||
|
you should handle that yourself. For example, if your application has a global |
||||
|
variable `Environment`, which is a string representation of the environment you |
||||
|
could do: |
||||
|
|
||||
|
```go |
||||
|
import ( |
||||
|
log "github.com/Sirupsen/logrus" |
||||
|
) |
||||
|
|
||||
|
init() { |
||||
|
// do something here to set environment depending on an environment variable |
||||
|
// or command-line flag |
||||
|
if Environment == "production" { |
||||
|
log.SetFormatter(&log.JSONFormatter{}) |
||||
|
} else { |
||||
|
// The TextFormatter is default, you don't actually have to do this. |
||||
|
log.SetFormatter(&log.TextFormatter{}) |
||||
|
} |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
This configuration is how `logrus` was intended to be used, but JSON in |
||||
|
production is mostly only useful if you do log aggregation with tools like |
||||
|
Splunk or Logstash. |
||||
|
|
||||
|
#### Formatters |
||||
|
|
||||
|
The built-in logging formatters are: |
||||
|
|
||||
|
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise |
||||
|
without colors. |
||||
|
* *Note:* to force colored output when there is no TTY, set the `ForceColors` |
||||
|
field to `true`. To force no colored output even if there is a TTY set the |
||||
|
`DisableColors` field to `true` |
||||
|
* `logrus.JSONFormatter`. Logs fields as JSON. |
||||
|
|
||||
|
Third party logging formatters: |
||||
|
|
||||
|
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. |
||||
|
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. |
||||
|
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. |
||||
|
|
||||
|
You can define your formatter by implementing the `Formatter` interface, |
||||
|
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a |
||||
|
`Fields` type (`map[string]interface{}`) with all your fields as well as the |
||||
|
default ones (see Entries section above): |
||||
|
|
||||
|
```go |
||||
|
type MyJSONFormatter struct { |
||||
|
} |
||||
|
|
||||
|
log.SetFormatter(new(MyJSONFormatter)) |
||||
|
|
||||
|
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { |
||||
|
// Note this doesn't include Time, Level and Message which are available on |
||||
|
// the Entry. Consult `godoc` on information about those fields or read the |
||||
|
// source of the official loggers. |
||||
|
serialized, err := json.Marshal(entry.Data) |
||||
|
if err != nil { |
||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) |
||||
|
} |
||||
|
return append(serialized, '\n'), nil |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
#### Logger as an `io.Writer` |
||||
|
|
||||
|
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. |
||||
|
|
||||
|
```go |
||||
|
w := logger.Writer() |
||||
|
defer w.Close() |
||||
|
|
||||
|
srv := http.Server{ |
||||
|
// create a stdlib log.Logger that writes to |
||||
|
// logrus.Logger. |
||||
|
ErrorLog: log.New(w, "", 0), |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
Each line written to that writer will be printed the usual way, using formatters |
||||
|
and hooks. The level for those entries is `info`. |
||||
|
|
||||
|
#### Rotation |
||||
|
|
||||
|
Log rotation is not provided with Logrus. Log rotation should be done by an |
||||
|
external program (like `logrotate(8)`) that can compress and delete old log |
||||
|
entries. It should not be a feature of the application-level logger. |
||||
|
|
||||
|
#### Tools |
||||
|
|
||||
|
| Tool | Description | |
||||
|
| ---- | ----------- | |
||||
|
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| |
||||
|
|
||||
|
#### Testing |
||||
|
|
||||
|
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: |
||||
|
|
||||
|
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook |
||||
|
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): |
||||
|
|
||||
|
```go |
||||
|
logger, hook := NewNullLogger() |
||||
|
logger.Error("Hello error") |
||||
|
|
||||
|
assert.Equal(1, len(hook.Entries)) |
||||
|
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) |
||||
|
assert.Equal("Hello error", hook.LastEntry().Message) |
||||
|
|
||||
|
hook.Reset() |
||||
|
assert.Nil(hook.LastEntry()) |
||||
|
``` |
||||
|
|
||||
|
#### Fatal handlers |
||||
|
|
||||
|
Logrus can register one or more functions that will be called when any `fatal` |
||||
|
level message is logged. The registered handlers will be executed before |
||||
|
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need |
||||
|
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. |
||||
|
|
||||
|
``` |
||||
|
... |
||||
|
handler := func() { |
||||
|
// gracefully shutdown something... |
||||
|
} |
||||
|
logrus.RegisterExitHandler(handler) |
||||
|
... |
||||
|
``` |
@ -0,0 +1,64 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
// The following code was sourced and modified from the
|
||||
|
// https://bitbucket.org/tebeka/atexit package governed by the following license:
|
||||
|
//
|
||||
|
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
|
||||
|
//
|
||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
|
// this software and associated documentation files (the "Software"), to deal in
|
||||
|
// the Software without restriction, including without limitation the rights to
|
||||
|
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
|
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
|
// subject to the following conditions:
|
||||
|
//
|
||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||
|
// copies or substantial portions of the Software.
|
||||
|
//
|
||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
|
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
|
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
|
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
|
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"os" |
||||
|
) |
||||
|
|
||||
|
var handlers = []func(){} |
||||
|
|
||||
|
func runHandler(handler func()) { |
||||
|
defer func() { |
||||
|
if err := recover(); err != nil { |
||||
|
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) |
||||
|
} |
||||
|
}() |
||||
|
|
||||
|
handler() |
||||
|
} |
||||
|
|
||||
|
func runHandlers() { |
||||
|
for _, handler := range handlers { |
||||
|
runHandler(handler) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
|
||||
|
func Exit(code int) { |
||||
|
runHandlers() |
||||
|
os.Exit(code) |
||||
|
} |
||||
|
|
||||
|
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
||||
|
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
||||
|
// made.
|
||||
|
//
|
||||
|
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
|
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
|
// closing database connections, or sending a alert that the application is
|
||||
|
// closing.
|
||||
|
func RegisterExitHandler(handler func()) { |
||||
|
handlers = append(handlers, handler) |
||||
|
} |
@ -0,0 +1,74 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"io/ioutil" |
||||
|
"os/exec" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
func TestRegister(t *testing.T) { |
||||
|
current := len(handlers) |
||||
|
RegisterExitHandler(func() {}) |
||||
|
if len(handlers) != current+1 { |
||||
|
t.Fatalf("can't add handler") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestHandler(t *testing.T) { |
||||
|
gofile := "/tmp/testprog.go" |
||||
|
if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil { |
||||
|
t.Fatalf("can't create go file") |
||||
|
} |
||||
|
|
||||
|
outfile := "/tmp/testprog.out" |
||||
|
arg := time.Now().UTC().String() |
||||
|
err := exec.Command("go", "run", gofile, outfile, arg).Run() |
||||
|
if err == nil { |
||||
|
t.Fatalf("completed normally, should have failed") |
||||
|
} |
||||
|
|
||||
|
data, err := ioutil.ReadFile(outfile) |
||||
|
if err != nil { |
||||
|
t.Fatalf("can't read output file %s", outfile) |
||||
|
} |
||||
|
|
||||
|
if string(data) != arg { |
||||
|
t.Fatalf("bad data") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
var testprog = []byte(` |
||||
|
// Test program for atexit, gets output file and data as arguments and writes
|
||||
|
// data to output file in atexit handler.
|
||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"github.com/Sirupsen/logrus" |
||||
|
"flag" |
||||
|
"fmt" |
||||
|
"io/ioutil" |
||||
|
) |
||||
|
|
||||
|
var outfile = "" |
||||
|
var data = "" |
||||
|
|
||||
|
func handler() { |
||||
|
ioutil.WriteFile(outfile, []byte(data), 0666) |
||||
|
} |
||||
|
|
||||
|
func badHandler() { |
||||
|
n := 0 |
||||
|
fmt.Println(1/n) |
||||
|
} |
||||
|
|
||||
|
func main() { |
||||
|
flag.Parse() |
||||
|
outfile = flag.Arg(0) |
||||
|
data = flag.Arg(1) |
||||
|
|
||||
|
logrus.RegisterExitHandler(handler) |
||||
|
logrus.RegisterExitHandler(badHandler) |
||||
|
logrus.Fatal("Bye bye") |
||||
|
} |
||||
|
`) |
@ -0,0 +1,26 @@ |
|||||
|
/* |
||||
|
Package logrus is a structured logger for Go, completely API compatible with the standard library logger. |
||||
|
|
||||
|
|
||||
|
The simplest way to use Logrus is simply the package-level exported logger: |
||||
|
|
||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
log "github.com/Sirupsen/logrus" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
log.WithFields(log.Fields{ |
||||
|
"animal": "walrus", |
||||
|
"number": 1, |
||||
|
"size": 10, |
||||
|
}).Info("A walrus appears") |
||||
|
} |
||||
|
|
||||
|
Output: |
||||
|
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 |
||||
|
|
||||
|
For a full guide visit https://github.com/Sirupsen/logrus
|
||||
|
*/ |
||||
|
package logrus |
@ -0,0 +1,264 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"fmt" |
||||
|
"io" |
||||
|
"os" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
// Defines the key when adding errors using WithError.
|
||||
|
var ErrorKey = "error" |
||||
|
|
||||
|
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
|
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||
|
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
|
// passed around as much as you wish to avoid field duplication.
|
||||
|
type Entry struct { |
||||
|
Logger *Logger |
||||
|
|
||||
|
// Contains all the fields set by the user.
|
||||
|
Data Fields |
||||
|
|
||||
|
// Time at which the log entry was created
|
||||
|
Time time.Time |
||||
|
|
||||
|
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
|
Level Level |
||||
|
|
||||
|
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
|
Message string |
||||
|
} |
||||
|
|
||||
|
func NewEntry(logger *Logger) *Entry { |
||||
|
return &Entry{ |
||||
|
Logger: logger, |
||||
|
// Default is three fields, give a little extra room
|
||||
|
Data: make(Fields, 5), |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Returns a reader for the entry, which is a proxy to the formatter.
|
||||
|
func (entry *Entry) Reader() (*bytes.Buffer, error) { |
||||
|
serialized, err := entry.Logger.Formatter.Format(entry) |
||||
|
return bytes.NewBuffer(serialized), err |
||||
|
} |
||||
|
|
||||
|
// Returns the string representation from the reader and ultimately the
|
||||
|
// formatter.
|
||||
|
func (entry *Entry) String() (string, error) { |
||||
|
reader, err := entry.Reader() |
||||
|
if err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
|
||||
|
return reader.String(), err |
||||
|
} |
||||
|
|
||||
|
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||
|
func (entry *Entry) WithError(err error) *Entry { |
||||
|
return entry.WithField(ErrorKey, err) |
||||
|
} |
||||
|
|
||||
|
// Add a single field to the Entry.
|
||||
|
func (entry *Entry) WithField(key string, value interface{}) *Entry { |
||||
|
return entry.WithFields(Fields{key: value}) |
||||
|
} |
||||
|
|
||||
|
// Add a map of fields to the Entry.
|
||||
|
func (entry *Entry) WithFields(fields Fields) *Entry { |
||||
|
data := make(Fields, len(entry.Data)+len(fields)) |
||||
|
for k, v := range entry.Data { |
||||
|
data[k] = v |
||||
|
} |
||||
|
for k, v := range fields { |
||||
|
data[k] = v |
||||
|
} |
||||
|
return &Entry{Logger: entry.Logger, Data: data} |
||||
|
} |
||||
|
|
||||
|
// This function is not declared with a pointer value because otherwise
|
||||
|
// race conditions will occur when using multiple goroutines
|
||||
|
func (entry Entry) log(level Level, msg string) { |
||||
|
entry.Time = time.Now() |
||||
|
entry.Level = level |
||||
|
entry.Message = msg |
||||
|
|
||||
|
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { |
||||
|
entry.Logger.mu.Lock() |
||||
|
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) |
||||
|
entry.Logger.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
reader, err := entry.Reader() |
||||
|
if err != nil { |
||||
|
entry.Logger.mu.Lock() |
||||
|
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) |
||||
|
entry.Logger.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
entry.Logger.mu.Lock() |
||||
|
defer entry.Logger.mu.Unlock() |
||||
|
|
||||
|
_, err = io.Copy(entry.Logger.Out, reader) |
||||
|
if err != nil { |
||||
|
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) |
||||
|
} |
||||
|
|
||||
|
// To avoid Entry#log() returning a value that only would make sense for
|
||||
|
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
|
// directly here.
|
||||
|
if level <= PanicLevel { |
||||
|
panic(&entry) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Debug(args ...interface{}) { |
||||
|
if entry.Logger.Level >= DebugLevel { |
||||
|
entry.log(DebugLevel, fmt.Sprint(args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Print(args ...interface{}) { |
||||
|
entry.Info(args...) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Info(args ...interface{}) { |
||||
|
if entry.Logger.Level >= InfoLevel { |
||||
|
entry.log(InfoLevel, fmt.Sprint(args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Warn(args ...interface{}) { |
||||
|
if entry.Logger.Level >= WarnLevel { |
||||
|
entry.log(WarnLevel, fmt.Sprint(args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Warning(args ...interface{}) { |
||||
|
entry.Warn(args...) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Error(args ...interface{}) { |
||||
|
if entry.Logger.Level >= ErrorLevel { |
||||
|
entry.log(ErrorLevel, fmt.Sprint(args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Fatal(args ...interface{}) { |
||||
|
if entry.Logger.Level >= FatalLevel { |
||||
|
entry.log(FatalLevel, fmt.Sprint(args...)) |
||||
|
} |
||||
|
Exit(1) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Panic(args ...interface{}) { |
||||
|
if entry.Logger.Level >= PanicLevel { |
||||
|
entry.log(PanicLevel, fmt.Sprint(args...)) |
||||
|
} |
||||
|
panic(fmt.Sprint(args...)) |
||||
|
} |
||||
|
|
||||
|
// Entry Printf family functions
|
||||
|
|
||||
|
func (entry *Entry) Debugf(format string, args ...interface{}) { |
||||
|
if entry.Logger.Level >= DebugLevel { |
||||
|
entry.Debug(fmt.Sprintf(format, args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Infof(format string, args ...interface{}) { |
||||
|
if entry.Logger.Level >= InfoLevel { |
||||
|
entry.Info(fmt.Sprintf(format, args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Printf(format string, args ...interface{}) { |
||||
|
entry.Infof(format, args...) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Warnf(format string, args ...interface{}) { |
||||
|
if entry.Logger.Level >= WarnLevel { |
||||
|
entry.Warn(fmt.Sprintf(format, args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Warningf(format string, args ...interface{}) { |
||||
|
entry.Warnf(format, args...) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Errorf(format string, args ...interface{}) { |
||||
|
if entry.Logger.Level >= ErrorLevel { |
||||
|
entry.Error(fmt.Sprintf(format, args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Fatalf(format string, args ...interface{}) { |
||||
|
if entry.Logger.Level >= FatalLevel { |
||||
|
entry.Fatal(fmt.Sprintf(format, args...)) |
||||
|
} |
||||
|
Exit(1) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Panicf(format string, args ...interface{}) { |
||||
|
if entry.Logger.Level >= PanicLevel { |
||||
|
entry.Panic(fmt.Sprintf(format, args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Entry Println family functions
|
||||
|
|
||||
|
func (entry *Entry) Debugln(args ...interface{}) { |
||||
|
if entry.Logger.Level >= DebugLevel { |
||||
|
entry.Debug(entry.sprintlnn(args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Infoln(args ...interface{}) { |
||||
|
if entry.Logger.Level >= InfoLevel { |
||||
|
entry.Info(entry.sprintlnn(args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Println(args ...interface{}) { |
||||
|
entry.Infoln(args...) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Warnln(args ...interface{}) { |
||||
|
if entry.Logger.Level >= WarnLevel { |
||||
|
entry.Warn(entry.sprintlnn(args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Warningln(args ...interface{}) { |
||||
|
entry.Warnln(args...) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Errorln(args ...interface{}) { |
||||
|
if entry.Logger.Level >= ErrorLevel { |
||||
|
entry.Error(entry.sprintlnn(args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Fatalln(args ...interface{}) { |
||||
|
if entry.Logger.Level >= FatalLevel { |
||||
|
entry.Fatal(entry.sprintlnn(args...)) |
||||
|
} |
||||
|
Exit(1) |
||||
|
} |
||||
|
|
||||
|
func (entry *Entry) Panicln(args ...interface{}) { |
||||
|
if entry.Logger.Level >= PanicLevel { |
||||
|
entry.Panic(entry.sprintlnn(args...)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
|
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||
|
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||
|
// string allocation, we do the simplest thing.
|
||||
|
func (entry *Entry) sprintlnn(args ...interface{}) string { |
||||
|
msg := fmt.Sprintln(args...) |
||||
|
return msg[:len(msg)-1] |
||||
|
} |
@ -0,0 +1,77 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"fmt" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/stretchr/testify/assert" |
||||
|
) |
||||
|
|
||||
|
func TestEntryWithError(t *testing.T) { |
||||
|
|
||||
|
assert := assert.New(t) |
||||
|
|
||||
|
defer func() { |
||||
|
ErrorKey = "error" |
||||
|
}() |
||||
|
|
||||
|
err := fmt.Errorf("kaboom at layer %d", 4711) |
||||
|
|
||||
|
assert.Equal(err, WithError(err).Data["error"]) |
||||
|
|
||||
|
logger := New() |
||||
|
logger.Out = &bytes.Buffer{} |
||||
|
entry := NewEntry(logger) |
||||
|
|
||||
|
assert.Equal(err, entry.WithError(err).Data["error"]) |
||||
|
|
||||
|
ErrorKey = "err" |
||||
|
|
||||
|
assert.Equal(err, entry.WithError(err).Data["err"]) |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func TestEntryPanicln(t *testing.T) { |
||||
|
errBoom := fmt.Errorf("boom time") |
||||
|
|
||||
|
defer func() { |
||||
|
p := recover() |
||||
|
assert.NotNil(t, p) |
||||
|
|
||||
|
switch pVal := p.(type) { |
||||
|
case *Entry: |
||||
|
assert.Equal(t, "kaboom", pVal.Message) |
||||
|
assert.Equal(t, errBoom, pVal.Data["err"]) |
||||
|
default: |
||||
|
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) |
||||
|
} |
||||
|
}() |
||||
|
|
||||
|
logger := New() |
||||
|
logger.Out = &bytes.Buffer{} |
||||
|
entry := NewEntry(logger) |
||||
|
entry.WithField("err", errBoom).Panicln("kaboom") |
||||
|
} |
||||
|
|
||||
|
func TestEntryPanicf(t *testing.T) { |
||||
|
errBoom := fmt.Errorf("boom again") |
||||
|
|
||||
|
defer func() { |
||||
|
p := recover() |
||||
|
assert.NotNil(t, p) |
||||
|
|
||||
|
switch pVal := p.(type) { |
||||
|
case *Entry: |
||||
|
assert.Equal(t, "kaboom true", pVal.Message) |
||||
|
assert.Equal(t, errBoom, pVal.Data["err"]) |
||||
|
default: |
||||
|
t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) |
||||
|
} |
||||
|
}() |
||||
|
|
||||
|
logger := New() |
||||
|
logger.Out = &bytes.Buffer{} |
||||
|
entry := NewEntry(logger) |
||||
|
entry.WithField("err", errBoom).Panicf("kaboom %v", true) |
||||
|
} |
@ -0,0 +1,50 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"github.com/Sirupsen/logrus" |
||||
|
) |
||||
|
|
||||
|
var log = logrus.New() |
||||
|
|
||||
|
func init() { |
||||
|
log.Formatter = new(logrus.JSONFormatter) |
||||
|
log.Formatter = new(logrus.TextFormatter) // default
|
||||
|
log.Level = logrus.DebugLevel |
||||
|
} |
||||
|
|
||||
|
func main() { |
||||
|
defer func() { |
||||
|
err := recover() |
||||
|
if err != nil { |
||||
|
log.WithFields(logrus.Fields{ |
||||
|
"omg": true, |
||||
|
"err": err, |
||||
|
"number": 100, |
||||
|
}).Fatal("The ice breaks!") |
||||
|
} |
||||
|
}() |
||||
|
|
||||
|
log.WithFields(logrus.Fields{ |
||||
|
"animal": "walrus", |
||||
|
"number": 8, |
||||
|
}).Debug("Started observing beach") |
||||
|
|
||||
|
log.WithFields(logrus.Fields{ |
||||
|
"animal": "walrus", |
||||
|
"size": 10, |
||||
|
}).Info("A group of walrus emerges from the ocean") |
||||
|
|
||||
|
log.WithFields(logrus.Fields{ |
||||
|
"omg": true, |
||||
|
"number": 122, |
||||
|
}).Warn("The group's number increased tremendously!") |
||||
|
|
||||
|
log.WithFields(logrus.Fields{ |
||||
|
"temperature": -4, |
||||
|
}).Debug("Temperature changes") |
||||
|
|
||||
|
log.WithFields(logrus.Fields{ |
||||
|
"animal": "orca", |
||||
|
"size": 9009, |
||||
|
}).Panic("It's over 9000!") |
||||
|
} |
@ -0,0 +1,30 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"github.com/Sirupsen/logrus" |
||||
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" |
||||
|
) |
||||
|
|
||||
|
var log = logrus.New() |
||||
|
|
||||
|
func init() { |
||||
|
log.Formatter = new(logrus.TextFormatter) // default
|
||||
|
log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) |
||||
|
} |
||||
|
|
||||
|
func main() { |
||||
|
log.WithFields(logrus.Fields{ |
||||
|
"animal": "walrus", |
||||
|
"size": 10, |
||||
|
}).Info("A group of walrus emerges from the ocean") |
||||
|
|
||||
|
log.WithFields(logrus.Fields{ |
||||
|
"omg": true, |
||||
|
"number": 122, |
||||
|
}).Warn("The group's number increased tremendously!") |
||||
|
|
||||
|
log.WithFields(logrus.Fields{ |
||||
|
"omg": true, |
||||
|
"number": 100, |
||||
|
}).Fatal("The ice breaks!") |
||||
|
} |
@ -0,0 +1,193 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"io" |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
// std is the name of the standard logger in stdlib `log`
|
||||
|
std = New() |
||||
|
) |
||||
|
|
||||
|
func StandardLogger() *Logger { |
||||
|
return std |
||||
|
} |
||||
|
|
||||
|
// SetOutput sets the standard logger output.
|
||||
|
func SetOutput(out io.Writer) { |
||||
|
std.mu.Lock() |
||||
|
defer std.mu.Unlock() |
||||
|
std.Out = out |
||||
|
} |
||||
|
|
||||
|
// SetFormatter sets the standard logger formatter.
|
||||
|
func SetFormatter(formatter Formatter) { |
||||
|
std.mu.Lock() |
||||
|
defer std.mu.Unlock() |
||||
|
std.Formatter = formatter |
||||
|
} |
||||
|
|
||||
|
// SetLevel sets the standard logger level.
|
||||
|
func SetLevel(level Level) { |
||||
|
std.mu.Lock() |
||||
|
defer std.mu.Unlock() |
||||
|
std.Level = level |
||||
|
} |
||||
|
|
||||
|
// GetLevel returns the standard logger level.
|
||||
|
func GetLevel() Level { |
||||
|
std.mu.Lock() |
||||
|
defer std.mu.Unlock() |
||||
|
return std.Level |
||||
|
} |
||||
|
|
||||
|
// AddHook adds a hook to the standard logger hooks.
|
||||
|
func AddHook(hook Hook) { |
||||
|
std.mu.Lock() |
||||
|
defer std.mu.Unlock() |
||||
|
std.Hooks.Add(hook) |
||||
|
} |
||||
|
|
||||
|
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||
|
func WithError(err error) *Entry { |
||||
|
return std.WithField(ErrorKey, err) |
||||
|
} |
||||
|
|
||||
|
// WithField creates an entry from the standard logger and adds a field to
|
||||
|
// it. If you want multiple fields, use `WithFields`.
|
||||
|
//
|
||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
|
// or Panic on the Entry it returns.
|
||||
|
func WithField(key string, value interface{}) *Entry { |
||||
|
return std.WithField(key, value) |
||||
|
} |
||||
|
|
||||
|
// WithFields creates an entry from the standard logger and adds multiple
|
||||
|
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||
|
// once for each field.
|
||||
|
//
|
||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
|
// or Panic on the Entry it returns.
|
||||
|
func WithFields(fields Fields) *Entry { |
||||
|
return std.WithFields(fields) |
||||
|
} |
||||
|
|
||||
|
// Debug logs a message at level Debug on the standard logger.
|
||||
|
func Debug(args ...interface{}) { |
||||
|
std.Debug(args...) |
||||
|
} |
||||
|
|
||||
|
// Print logs a message at level Info on the standard logger.
|
||||
|
func Print(args ...interface{}) { |
||||
|
std.Print(args...) |
||||
|
} |
||||
|
|
||||
|
// Info logs a message at level Info on the standard logger.
|
||||
|
func Info(args ...interface{}) { |
||||
|
std.Info(args...) |
||||
|
} |
||||
|
|
||||
|
// Warn logs a message at level Warn on the standard logger.
|
||||
|
func Warn(args ...interface{}) { |
||||
|
std.Warn(args...) |
||||
|
} |
||||
|
|
||||
|
// Warning logs a message at level Warn on the standard logger.
|
||||
|
func Warning(args ...interface{}) { |
||||
|
std.Warning(args...) |
||||
|
} |
||||
|
|
||||
|
// Error logs a message at level Error on the standard logger.
|
||||
|
func Error(args ...interface{}) { |
||||
|
std.Error(args...) |
||||
|
} |
||||
|
|
||||
|
// Panic logs a message at level Panic on the standard logger.
|
||||
|
func Panic(args ...interface{}) { |
||||
|
std.Panic(args...) |
||||
|
} |
||||
|
|
||||
|
// Fatal logs a message at level Fatal on the standard logger.
|
||||
|
func Fatal(args ...interface{}) { |
||||
|
std.Fatal(args...) |
||||
|
} |
||||
|
|
||||
|
// Debugf logs a message at level Debug on the standard logger.
|
||||
|
func Debugf(format string, args ...interface{}) { |
||||
|
std.Debugf(format, args...) |
||||
|
} |
||||
|
|
||||
|
// Printf logs a message at level Info on the standard logger.
|
||||
|
func Printf(format string, args ...interface{}) { |
||||
|
std.Printf(format, args...) |
||||
|
} |
||||
|
|
||||
|
// Infof logs a message at level Info on the standard logger.
|
||||
|
func Infof(format string, args ...interface{}) { |
||||
|
std.Infof(format, args...) |
||||
|
} |
||||
|
|
||||
|
// Warnf logs a message at level Warn on the standard logger.
|
||||
|
func Warnf(format string, args ...interface{}) { |
||||
|
std.Warnf(format, args...) |
||||
|
} |
||||
|
|
||||
|
// Warningf logs a message at level Warn on the standard logger.
|
||||
|
func Warningf(format string, args ...interface{}) { |
||||
|
std.Warningf(format, args...) |
||||
|
} |
||||
|
|
||||
|
// Errorf logs a message at level Error on the standard logger.
|
||||
|
func Errorf(format string, args ...interface{}) { |
||||
|
std.Errorf(format, args...) |
||||
|
} |
||||
|
|
||||
|
// Panicf logs a message at level Panic on the standard logger.
|
||||
|
func Panicf(format string, args ...interface{}) { |
||||
|
std.Panicf(format, args...) |
||||
|
} |
||||
|
|
||||
|
// Fatalf logs a message at level Fatal on the standard logger.
|
||||
|
func Fatalf(format string, args ...interface{}) { |
||||
|
std.Fatalf(format, args...) |
||||
|
} |
||||
|
|
||||
|
// Debugln logs a message at level Debug on the standard logger.
|
||||
|
func Debugln(args ...interface{}) { |
||||
|
std.Debugln(args...) |
||||
|
} |
||||
|
|
||||
|
// Println logs a message at level Info on the standard logger.
|
||||
|
func Println(args ...interface{}) { |
||||
|
std.Println(args...) |
||||
|
} |
||||
|
|
||||
|
// Infoln logs a message at level Info on the standard logger.
|
||||
|
func Infoln(args ...interface{}) { |
||||
|
std.Infoln(args...) |
||||
|
} |
||||
|
|
||||
|
// Warnln logs a message at level Warn on the standard logger.
|
||||
|
func Warnln(args ...interface{}) { |
||||
|
std.Warnln(args...) |
||||
|
} |
||||
|
|
||||
|
// Warningln logs a message at level Warn on the standard logger.
|
||||
|
func Warningln(args ...interface{}) { |
||||
|
std.Warningln(args...) |
||||
|
} |
||||
|
|
||||
|
// Errorln logs a message at level Error on the standard logger.
|
||||
|
func Errorln(args ...interface{}) { |
||||
|
std.Errorln(args...) |
||||
|
} |
||||
|
|
||||
|
// Panicln logs a message at level Panic on the standard logger.
|
||||
|
func Panicln(args ...interface{}) { |
||||
|
std.Panicln(args...) |
||||
|
} |
||||
|
|
||||
|
// Fatalln logs a message at level Fatal on the standard logger.
|
||||
|
func Fatalln(args ...interface{}) { |
||||
|
std.Fatalln(args...) |
||||
|
} |
@ -0,0 +1,45 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import "time" |
||||
|
|
||||
|
const DefaultTimestampFormat = time.RFC3339 |
||||
|
|
||||
|
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
|
// `Entry`. It exposes all the fields, including the default ones:
|
||||
|
//
|
||||
|
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||
|
// * `entry.Data["time"]`. The timestamp.
|
||||
|
// * `entry.Data["level"]. The level the entry was logged at.
|
||||
|
//
|
||||
|
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||
|
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||
|
// logged to `logger.Out`.
|
||||
|
type Formatter interface { |
||||
|
Format(*Entry) ([]byte, error) |
||||
|
} |
||||
|
|
||||
|
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
|
// dumping it. If this code wasn't there doing:
|
||||
|
//
|
||||
|
// logrus.WithField("level", 1).Info("hello")
|
||||
|
//
|
||||
|
// Would just silently drop the user provided level. Instead with this code
|
||||
|
// it'll logged as:
|
||||
|
//
|
||||
|
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||
|
//
|
||||
|
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||
|
// avoid code duplication between the two default formatters.
|
||||
|
func prefixFieldClashes(data Fields) { |
||||
|
if t, ok := data["time"]; ok { |
||||
|
data["fields.time"] = t |
||||
|
} |
||||
|
|
||||
|
if m, ok := data["msg"]; ok { |
||||
|
data["fields.msg"] = m |
||||
|
} |
||||
|
|
||||
|
if l, ok := data["level"]; ok { |
||||
|
data["fields.level"] = l |
||||
|
} |
||||
|
} |
@ -0,0 +1,98 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
// smallFields is a small size data set for benchmarking
|
||||
|
var smallFields = Fields{ |
||||
|
"foo": "bar", |
||||
|
"baz": "qux", |
||||
|
"one": "two", |
||||
|
"three": "four", |
||||
|
} |
||||
|
|
||||
|
// largeFields is a large size data set for benchmarking
|
||||
|
var largeFields = Fields{ |
||||
|
"foo": "bar", |
||||
|
"baz": "qux", |
||||
|
"one": "two", |
||||
|
"three": "four", |
||||
|
"five": "six", |
||||
|
"seven": "eight", |
||||
|
"nine": "ten", |
||||
|
"eleven": "twelve", |
||||
|
"thirteen": "fourteen", |
||||
|
"fifteen": "sixteen", |
||||
|
"seventeen": "eighteen", |
||||
|
"nineteen": "twenty", |
||||
|
"a": "b", |
||||
|
"c": "d", |
||||
|
"e": "f", |
||||
|
"g": "h", |
||||
|
"i": "j", |
||||
|
"k": "l", |
||||
|
"m": "n", |
||||
|
"o": "p", |
||||
|
"q": "r", |
||||
|
"s": "t", |
||||
|
"u": "v", |
||||
|
"w": "x", |
||||
|
"y": "z", |
||||
|
"this": "will", |
||||
|
"make": "thirty", |
||||
|
"entries": "yeah", |
||||
|
} |
||||
|
|
||||
|
var errorFields = Fields{ |
||||
|
"foo": fmt.Errorf("bar"), |
||||
|
"baz": fmt.Errorf("qux"), |
||||
|
} |
||||
|
|
||||
|
func BenchmarkErrorTextFormatter(b *testing.B) { |
||||
|
doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkSmallTextFormatter(b *testing.B) { |
||||
|
doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkLargeTextFormatter(b *testing.B) { |
||||
|
doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkSmallColoredTextFormatter(b *testing.B) { |
||||
|
doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkLargeColoredTextFormatter(b *testing.B) { |
||||
|
doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkSmallJSONFormatter(b *testing.B) { |
||||
|
doBenchmark(b, &JSONFormatter{}, smallFields) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkLargeJSONFormatter(b *testing.B) { |
||||
|
doBenchmark(b, &JSONFormatter{}, largeFields) |
||||
|
} |
||||
|
|
||||
|
func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { |
||||
|
entry := &Entry{ |
||||
|
Time: time.Time{}, |
||||
|
Level: InfoLevel, |
||||
|
Message: "message", |
||||
|
Data: fields, |
||||
|
} |
||||
|
var d []byte |
||||
|
var err error |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
d, err = formatter.Format(entry) |
||||
|
if err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
b.SetBytes(int64(len(d))) |
||||
|
} |
||||
|
} |
@ -0,0 +1,122 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/stretchr/testify/assert" |
||||
|
) |
||||
|
|
||||
|
type TestHook struct { |
||||
|
Fired bool |
||||
|
} |
||||
|
|
||||
|
func (hook *TestHook) Fire(entry *Entry) error { |
||||
|
hook.Fired = true |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (hook *TestHook) Levels() []Level { |
||||
|
return []Level{ |
||||
|
DebugLevel, |
||||
|
InfoLevel, |
||||
|
WarnLevel, |
||||
|
ErrorLevel, |
||||
|
FatalLevel, |
||||
|
PanicLevel, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestHookFires(t *testing.T) { |
||||
|
hook := new(TestHook) |
||||
|
|
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Hooks.Add(hook) |
||||
|
assert.Equal(t, hook.Fired, false) |
||||
|
|
||||
|
log.Print("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, hook.Fired, true) |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
type ModifyHook struct { |
||||
|
} |
||||
|
|
||||
|
func (hook *ModifyHook) Fire(entry *Entry) error { |
||||
|
entry.Data["wow"] = "whale" |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (hook *ModifyHook) Levels() []Level { |
||||
|
return []Level{ |
||||
|
DebugLevel, |
||||
|
InfoLevel, |
||||
|
WarnLevel, |
||||
|
ErrorLevel, |
||||
|
FatalLevel, |
||||
|
PanicLevel, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestHookCanModifyEntry(t *testing.T) { |
||||
|
hook := new(ModifyHook) |
||||
|
|
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Hooks.Add(hook) |
||||
|
log.WithField("wow", "elephant").Print("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["wow"], "whale") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestCanFireMultipleHooks(t *testing.T) { |
||||
|
hook1 := new(ModifyHook) |
||||
|
hook2 := new(TestHook) |
||||
|
|
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Hooks.Add(hook1) |
||||
|
log.Hooks.Add(hook2) |
||||
|
|
||||
|
log.WithField("wow", "elephant").Print("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["wow"], "whale") |
||||
|
assert.Equal(t, hook2.Fired, true) |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
type ErrorHook struct { |
||||
|
Fired bool |
||||
|
} |
||||
|
|
||||
|
func (hook *ErrorHook) Fire(entry *Entry) error { |
||||
|
hook.Fired = true |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (hook *ErrorHook) Levels() []Level { |
||||
|
return []Level{ |
||||
|
ErrorLevel, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestErrorHookShouldntFireOnInfo(t *testing.T) { |
||||
|
hook := new(ErrorHook) |
||||
|
|
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Hooks.Add(hook) |
||||
|
log.Info("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, hook.Fired, false) |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestErrorHookShouldFireOnError(t *testing.T) { |
||||
|
hook := new(ErrorHook) |
||||
|
|
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Hooks.Add(hook) |
||||
|
log.Error("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, hook.Fired, true) |
||||
|
}) |
||||
|
} |
@ -0,0 +1,34 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
// A hook to be fired when logging on the logging levels returned from
|
||||
|
// `Levels()` on your implementation of the interface. Note that this is not
|
||||
|
// fired in a goroutine or a channel with workers, you should handle such
|
||||
|
// functionality yourself if your call is non-blocking and you don't wish for
|
||||
|
// the logging calls for levels returned from `Levels()` to block.
|
||||
|
type Hook interface { |
||||
|
Levels() []Level |
||||
|
Fire(*Entry) error |
||||
|
} |
||||
|
|
||||
|
// Internal type for storing the hooks on a logger instance.
|
||||
|
type LevelHooks map[Level][]Hook |
||||
|
|
||||
|
// Add a hook to an instance of logger. This is called with
|
||||
|
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||
|
func (hooks LevelHooks) Add(hook Hook) { |
||||
|
for _, level := range hook.Levels() { |
||||
|
hooks[level] = append(hooks[level], hook) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||
|
// appropriate hooks for a log entry.
|
||||
|
func (hooks LevelHooks) Fire(level Level, entry *Entry) error { |
||||
|
for _, hook := range hooks[level] { |
||||
|
if err := hook.Fire(entry); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
@ -0,0 +1,39 @@ |
|||||
|
# Syslog Hooks for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> |
||||
|
|
||||
|
## Usage |
||||
|
|
||||
|
```go |
||||
|
import ( |
||||
|
"log/syslog" |
||||
|
"github.com/Sirupsen/logrus" |
||||
|
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
log := logrus.New() |
||||
|
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") |
||||
|
|
||||
|
if err == nil { |
||||
|
log.Hooks.Add(hook) |
||||
|
} |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. |
||||
|
|
||||
|
```go |
||||
|
import ( |
||||
|
"log/syslog" |
||||
|
"github.com/Sirupsen/logrus" |
||||
|
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
log := logrus.New() |
||||
|
hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "") |
||||
|
|
||||
|
if err == nil { |
||||
|
log.Hooks.Add(hook) |
||||
|
} |
||||
|
} |
||||
|
``` |
@ -0,0 +1,54 @@ |
|||||
|
// +build !windows,!nacl,!plan9
|
||||
|
|
||||
|
package logrus_syslog |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"github.com/Sirupsen/logrus" |
||||
|
"log/syslog" |
||||
|
"os" |
||||
|
) |
||||
|
|
||||
|
// SyslogHook to send logs via syslog.
|
||||
|
type SyslogHook struct { |
||||
|
Writer *syslog.Writer |
||||
|
SyslogNetwork string |
||||
|
SyslogRaddr string |
||||
|
} |
||||
|
|
||||
|
// Creates a hook to be added to an instance of logger. This is called with
|
||||
|
// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")`
|
||||
|
// `if err == nil { log.Hooks.Add(hook) }`
|
||||
|
func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { |
||||
|
w, err := syslog.Dial(network, raddr, priority, tag) |
||||
|
return &SyslogHook{w, network, raddr}, err |
||||
|
} |
||||
|
|
||||
|
func (hook *SyslogHook) Fire(entry *logrus.Entry) error { |
||||
|
line, err := entry.String() |
||||
|
if err != nil { |
||||
|
fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
switch entry.Level { |
||||
|
case logrus.PanicLevel: |
||||
|
return hook.Writer.Crit(line) |
||||
|
case logrus.FatalLevel: |
||||
|
return hook.Writer.Crit(line) |
||||
|
case logrus.ErrorLevel: |
||||
|
return hook.Writer.Err(line) |
||||
|
case logrus.WarnLevel: |
||||
|
return hook.Writer.Warning(line) |
||||
|
case logrus.InfoLevel: |
||||
|
return hook.Writer.Info(line) |
||||
|
case logrus.DebugLevel: |
||||
|
return hook.Writer.Debug(line) |
||||
|
default: |
||||
|
return nil |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (hook *SyslogHook) Levels() []logrus.Level { |
||||
|
return logrus.AllLevels |
||||
|
} |
@ -0,0 +1,26 @@ |
|||||
|
package logrus_syslog |
||||
|
|
||||
|
import ( |
||||
|
"github.com/Sirupsen/logrus" |
||||
|
"log/syslog" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestLocalhostAddAndPrint(t *testing.T) { |
||||
|
log := logrus.New() |
||||
|
hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") |
||||
|
|
||||
|
if err != nil { |
||||
|
t.Errorf("Unable to connect to local syslog.") |
||||
|
} |
||||
|
|
||||
|
log.Hooks.Add(hook) |
||||
|
|
||||
|
for _, level := range hook.Levels() { |
||||
|
if len(log.Hooks[level]) != 1 { |
||||
|
t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
log.Info("Congratulations!") |
||||
|
} |
@ -0,0 +1,67 @@ |
|||||
|
package test |
||||
|
|
||||
|
import ( |
||||
|
"io/ioutil" |
||||
|
|
||||
|
"github.com/Sirupsen/logrus" |
||||
|
) |
||||
|
|
||||
|
// test.Hook is a hook designed for dealing with logs in test scenarios.
|
||||
|
type Hook struct { |
||||
|
Entries []*logrus.Entry |
||||
|
} |
||||
|
|
||||
|
// Installs a test hook for the global logger.
|
||||
|
func NewGlobal() *Hook { |
||||
|
|
||||
|
hook := new(Hook) |
||||
|
logrus.AddHook(hook) |
||||
|
|
||||
|
return hook |
||||
|
|
||||
|
} |
||||
|
|
||||
|
// Installs a test hook for a given local logger.
|
||||
|
func NewLocal(logger *logrus.Logger) *Hook { |
||||
|
|
||||
|
hook := new(Hook) |
||||
|
logger.Hooks.Add(hook) |
||||
|
|
||||
|
return hook |
||||
|
|
||||
|
} |
||||
|
|
||||
|
// Creates a discarding logger and installs the test hook.
|
||||
|
func NewNullLogger() (*logrus.Logger, *Hook) { |
||||
|
|
||||
|
logger := logrus.New() |
||||
|
logger.Out = ioutil.Discard |
||||
|
|
||||
|
return logger, NewLocal(logger) |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func (t *Hook) Fire(e *logrus.Entry) error { |
||||
|
t.Entries = append(t.Entries, e) |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (t *Hook) Levels() []logrus.Level { |
||||
|
return logrus.AllLevels |
||||
|
} |
||||
|
|
||||
|
// LastEntry returns the last entry that was logged or nil.
|
||||
|
func (t *Hook) LastEntry() (l *logrus.Entry) { |
||||
|
|
||||
|
if i := len(t.Entries) - 1; i < 0 { |
||||
|
return nil |
||||
|
} else { |
||||
|
return t.Entries[i] |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
|
||||
|
// Reset removes all Entries from this test hook.
|
||||
|
func (t *Hook) Reset() { |
||||
|
t.Entries = make([]*logrus.Entry, 0) |
||||
|
} |
@ -0,0 +1,39 @@ |
|||||
|
package test |
||||
|
|
||||
|
import ( |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/Sirupsen/logrus" |
||||
|
"github.com/stretchr/testify/assert" |
||||
|
) |
||||
|
|
||||
|
func TestAllHooks(t *testing.T) { |
||||
|
|
||||
|
assert := assert.New(t) |
||||
|
|
||||
|
logger, hook := NewNullLogger() |
||||
|
assert.Nil(hook.LastEntry()) |
||||
|
assert.Equal(0, len(hook.Entries)) |
||||
|
|
||||
|
logger.Error("Hello error") |
||||
|
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) |
||||
|
assert.Equal("Hello error", hook.LastEntry().Message) |
||||
|
assert.Equal(1, len(hook.Entries)) |
||||
|
|
||||
|
logger.Warn("Hello warning") |
||||
|
assert.Equal(logrus.WarnLevel, hook.LastEntry().Level) |
||||
|
assert.Equal("Hello warning", hook.LastEntry().Message) |
||||
|
assert.Equal(2, len(hook.Entries)) |
||||
|
|
||||
|
hook.Reset() |
||||
|
assert.Nil(hook.LastEntry()) |
||||
|
assert.Equal(0, len(hook.Entries)) |
||||
|
|
||||
|
hook = NewGlobal() |
||||
|
|
||||
|
logrus.Error("Hello error") |
||||
|
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) |
||||
|
assert.Equal("Hello error", hook.LastEntry().Message) |
||||
|
assert.Equal(1, len(hook.Entries)) |
||||
|
|
||||
|
} |
@ -0,0 +1,41 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"encoding/json" |
||||
|
"fmt" |
||||
|
) |
||||
|
|
||||
|
type JSONFormatter struct { |
||||
|
// TimestampFormat sets the format used for marshaling timestamps.
|
||||
|
TimestampFormat string |
||||
|
} |
||||
|
|
||||
|
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { |
||||
|
data := make(Fields, len(entry.Data)+3) |
||||
|
for k, v := range entry.Data { |
||||
|
switch v := v.(type) { |
||||
|
case error: |
||||
|
// Otherwise errors are ignored by `encoding/json`
|
||||
|
// https://github.com/Sirupsen/logrus/issues/137
|
||||
|
data[k] = v.Error() |
||||
|
default: |
||||
|
data[k] = v |
||||
|
} |
||||
|
} |
||||
|
prefixFieldClashes(data) |
||||
|
|
||||
|
timestampFormat := f.TimestampFormat |
||||
|
if timestampFormat == "" { |
||||
|
timestampFormat = DefaultTimestampFormat |
||||
|
} |
||||
|
|
||||
|
data["time"] = entry.Time.Format(timestampFormat) |
||||
|
data["msg"] = entry.Message |
||||
|
data["level"] = entry.Level.String() |
||||
|
|
||||
|
serialized, err := json.Marshal(data) |
||||
|
if err != nil { |
||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) |
||||
|
} |
||||
|
return append(serialized, '\n'), nil |
||||
|
} |
@ -0,0 +1,120 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"encoding/json" |
||||
|
"errors" |
||||
|
|
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestErrorNotLost(t *testing.T) { |
||||
|
formatter := &JSONFormatter{} |
||||
|
|
||||
|
b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to format entry: ", err) |
||||
|
} |
||||
|
|
||||
|
entry := make(map[string]interface{}) |
||||
|
err = json.Unmarshal(b, &entry) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to unmarshal formatted entry: ", err) |
||||
|
} |
||||
|
|
||||
|
if entry["error"] != "wild walrus" { |
||||
|
t.Fatal("Error field not set") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { |
||||
|
formatter := &JSONFormatter{} |
||||
|
|
||||
|
b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to format entry: ", err) |
||||
|
} |
||||
|
|
||||
|
entry := make(map[string]interface{}) |
||||
|
err = json.Unmarshal(b, &entry) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to unmarshal formatted entry: ", err) |
||||
|
} |
||||
|
|
||||
|
if entry["omg"] != "wild walrus" { |
||||
|
t.Fatal("Error field not set") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestFieldClashWithTime(t *testing.T) { |
||||
|
formatter := &JSONFormatter{} |
||||
|
|
||||
|
b, err := formatter.Format(WithField("time", "right now!")) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to format entry: ", err) |
||||
|
} |
||||
|
|
||||
|
entry := make(map[string]interface{}) |
||||
|
err = json.Unmarshal(b, &entry) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to unmarshal formatted entry: ", err) |
||||
|
} |
||||
|
|
||||
|
if entry["fields.time"] != "right now!" { |
||||
|
t.Fatal("fields.time not set to original time field") |
||||
|
} |
||||
|
|
||||
|
if entry["time"] != "0001-01-01T00:00:00Z" { |
||||
|
t.Fatal("time field not set to current time, was: ", entry["time"]) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestFieldClashWithMsg(t *testing.T) { |
||||
|
formatter := &JSONFormatter{} |
||||
|
|
||||
|
b, err := formatter.Format(WithField("msg", "something")) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to format entry: ", err) |
||||
|
} |
||||
|
|
||||
|
entry := make(map[string]interface{}) |
||||
|
err = json.Unmarshal(b, &entry) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to unmarshal formatted entry: ", err) |
||||
|
} |
||||
|
|
||||
|
if entry["fields.msg"] != "something" { |
||||
|
t.Fatal("fields.msg not set to original msg field") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestFieldClashWithLevel(t *testing.T) { |
||||
|
formatter := &JSONFormatter{} |
||||
|
|
||||
|
b, err := formatter.Format(WithField("level", "something")) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to format entry: ", err) |
||||
|
} |
||||
|
|
||||
|
entry := make(map[string]interface{}) |
||||
|
err = json.Unmarshal(b, &entry) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to unmarshal formatted entry: ", err) |
||||
|
} |
||||
|
|
||||
|
if entry["fields.level"] != "something" { |
||||
|
t.Fatal("fields.level not set to original level field") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestJSONEntryEndsWithNewline(t *testing.T) { |
||||
|
formatter := &JSONFormatter{} |
||||
|
|
||||
|
b, err := formatter.Format(WithField("level", "something")) |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to format entry: ", err) |
||||
|
} |
||||
|
|
||||
|
if b[len(b)-1] != '\n' { |
||||
|
t.Fatal("Expected JSON log entry to end with a newline") |
||||
|
} |
||||
|
} |
@ -0,0 +1,212 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"io" |
||||
|
"os" |
||||
|
"sync" |
||||
|
) |
||||
|
|
||||
|
type Logger struct { |
||||
|
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||
|
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||
|
// something more adventorous, such as logging to Kafka.
|
||||
|
Out io.Writer |
||||
|
// Hooks for the logger instance. These allow firing events based on logging
|
||||
|
// levels and log entries. For example, to send errors to an error tracking
|
||||
|
// service, log to StatsD or dump the core on fatal errors.
|
||||
|
Hooks LevelHooks |
||||
|
// All log entries pass through the formatter before logged to Out. The
|
||||
|
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||
|
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||
|
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||
|
// own that implements the `Formatter` interface, see the `README` or included
|
||||
|
// formatters for examples.
|
||||
|
Formatter Formatter |
||||
|
// The logging level the logger should log at. This is typically (and defaults
|
||||
|
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
|
// logged. `logrus.Debug` is useful in
|
||||
|
Level Level |
||||
|
// Used to sync writing to the log.
|
||||
|
mu sync.Mutex |
||||
|
} |
||||
|
|
||||
|
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||
|
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||
|
// instantiate your own:
|
||||
|
//
|
||||
|
// var log = &Logger{
|
||||
|
// Out: os.Stderr,
|
||||
|
// Formatter: new(JSONFormatter),
|
||||
|
// Hooks: make(LevelHooks),
|
||||
|
// Level: logrus.DebugLevel,
|
||||
|
// }
|
||||
|
//
|
||||
|
// It's recommended to make this a global instance called `log`.
|
||||
|
func New() *Logger { |
||||
|
return &Logger{ |
||||
|
Out: os.Stderr, |
||||
|
Formatter: new(TextFormatter), |
||||
|
Hooks: make(LevelHooks), |
||||
|
Level: InfoLevel, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Adds a field to the log entry, note that it doesn't log until you call
|
||||
|
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||
|
// If you want multiple fields, use `WithFields`.
|
||||
|
func (logger *Logger) WithField(key string, value interface{}) *Entry { |
||||
|
return NewEntry(logger).WithField(key, value) |
||||
|
} |
||||
|
|
||||
|
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||
|
// each `Field`.
|
||||
|
func (logger *Logger) WithFields(fields Fields) *Entry { |
||||
|
return NewEntry(logger).WithFields(fields) |
||||
|
} |
||||
|
|
||||
|
// Add an error as single field to the log entry. All it does is call
|
||||
|
// `WithError` for the given `error`.
|
||||
|
func (logger *Logger) WithError(err error) *Entry { |
||||
|
return NewEntry(logger).WithError(err) |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Debugf(format string, args ...interface{}) { |
||||
|
if logger.Level >= DebugLevel { |
||||
|
NewEntry(logger).Debugf(format, args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Infof(format string, args ...interface{}) { |
||||
|
if logger.Level >= InfoLevel { |
||||
|
NewEntry(logger).Infof(format, args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Printf(format string, args ...interface{}) { |
||||
|
NewEntry(logger).Printf(format, args...) |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Warnf(format string, args ...interface{}) { |
||||
|
if logger.Level >= WarnLevel { |
||||
|
NewEntry(logger).Warnf(format, args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Warningf(format string, args ...interface{}) { |
||||
|
if logger.Level >= WarnLevel { |
||||
|
NewEntry(logger).Warnf(format, args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Errorf(format string, args ...interface{}) { |
||||
|
if logger.Level >= ErrorLevel { |
||||
|
NewEntry(logger).Errorf(format, args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Fatalf(format string, args ...interface{}) { |
||||
|
if logger.Level >= FatalLevel { |
||||
|
NewEntry(logger).Fatalf(format, args...) |
||||
|
} |
||||
|
Exit(1) |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Panicf(format string, args ...interface{}) { |
||||
|
if logger.Level >= PanicLevel { |
||||
|
NewEntry(logger).Panicf(format, args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Debug(args ...interface{}) { |
||||
|
if logger.Level >= DebugLevel { |
||||
|
NewEntry(logger).Debug(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Info(args ...interface{}) { |
||||
|
if logger.Level >= InfoLevel { |
||||
|
NewEntry(logger).Info(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Print(args ...interface{}) { |
||||
|
NewEntry(logger).Info(args...) |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Warn(args ...interface{}) { |
||||
|
if logger.Level >= WarnLevel { |
||||
|
NewEntry(logger).Warn(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Warning(args ...interface{}) { |
||||
|
if logger.Level >= WarnLevel { |
||||
|
NewEntry(logger).Warn(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Error(args ...interface{}) { |
||||
|
if logger.Level >= ErrorLevel { |
||||
|
NewEntry(logger).Error(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Fatal(args ...interface{}) { |
||||
|
if logger.Level >= FatalLevel { |
||||
|
NewEntry(logger).Fatal(args...) |
||||
|
} |
||||
|
Exit(1) |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Panic(args ...interface{}) { |
||||
|
if logger.Level >= PanicLevel { |
||||
|
NewEntry(logger).Panic(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Debugln(args ...interface{}) { |
||||
|
if logger.Level >= DebugLevel { |
||||
|
NewEntry(logger).Debugln(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Infoln(args ...interface{}) { |
||||
|
if logger.Level >= InfoLevel { |
||||
|
NewEntry(logger).Infoln(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Println(args ...interface{}) { |
||||
|
NewEntry(logger).Println(args...) |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Warnln(args ...interface{}) { |
||||
|
if logger.Level >= WarnLevel { |
||||
|
NewEntry(logger).Warnln(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Warningln(args ...interface{}) { |
||||
|
if logger.Level >= WarnLevel { |
||||
|
NewEntry(logger).Warnln(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Errorln(args ...interface{}) { |
||||
|
if logger.Level >= ErrorLevel { |
||||
|
NewEntry(logger).Errorln(args...) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Fatalln(args ...interface{}) { |
||||
|
if logger.Level >= FatalLevel { |
||||
|
NewEntry(logger).Fatalln(args...) |
||||
|
} |
||||
|
Exit(1) |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) Panicln(args ...interface{}) { |
||||
|
if logger.Level >= PanicLevel { |
||||
|
NewEntry(logger).Panicln(args...) |
||||
|
} |
||||
|
} |
@ -0,0 +1,143 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"log" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
// Fields type, used to pass to `WithFields`.
|
||||
|
type Fields map[string]interface{} |
||||
|
|
||||
|
// Level type
|
||||
|
type Level uint8 |
||||
|
|
||||
|
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
|
func (level Level) String() string { |
||||
|
switch level { |
||||
|
case DebugLevel: |
||||
|
return "debug" |
||||
|
case InfoLevel: |
||||
|
return "info" |
||||
|
case WarnLevel: |
||||
|
return "warning" |
||||
|
case ErrorLevel: |
||||
|
return "error" |
||||
|
case FatalLevel: |
||||
|
return "fatal" |
||||
|
case PanicLevel: |
||||
|
return "panic" |
||||
|
} |
||||
|
|
||||
|
return "unknown" |
||||
|
} |
||||
|
|
||||
|
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
|
func ParseLevel(lvl string) (Level, error) { |
||||
|
switch strings.ToLower(lvl) { |
||||
|
case "panic": |
||||
|
return PanicLevel, nil |
||||
|
case "fatal": |
||||
|
return FatalLevel, nil |
||||
|
case "error": |
||||
|
return ErrorLevel, nil |
||||
|
case "warn", "warning": |
||||
|
return WarnLevel, nil |
||||
|
case "info": |
||||
|
return InfoLevel, nil |
||||
|
case "debug": |
||||
|
return DebugLevel, nil |
||||
|
} |
||||
|
|
||||
|
var l Level |
||||
|
return l, fmt.Errorf("not a valid logrus Level: %q", lvl) |
||||
|
} |
||||
|
|
||||
|
// A constant exposing all logging levels
|
||||
|
var AllLevels = []Level{ |
||||
|
PanicLevel, |
||||
|
FatalLevel, |
||||
|
ErrorLevel, |
||||
|
WarnLevel, |
||||
|
InfoLevel, |
||||
|
DebugLevel, |
||||
|
} |
||||
|
|
||||
|
// These are the different logging levels. You can set the logging level to log
|
||||
|
// on your instance of logger, obtained with `logrus.New()`.
|
||||
|
const ( |
||||
|
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||
|
// message passed to Debug, Info, ...
|
||||
|
PanicLevel Level = iota |
||||
|
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||
|
// logging level is set to Panic.
|
||||
|
FatalLevel |
||||
|
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||
|
// Commonly used for hooks to send errors to an error tracking service.
|
||||
|
ErrorLevel |
||||
|
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||
|
WarnLevel |
||||
|
// InfoLevel level. General operational entries about what's going on inside the
|
||||
|
// application.
|
||||
|
InfoLevel |
||||
|
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||
|
DebugLevel |
||||
|
) |
||||
|
|
||||
|
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||
|
var ( |
||||
|
_ StdLogger = &log.Logger{} |
||||
|
_ StdLogger = &Entry{} |
||||
|
_ StdLogger = &Logger{} |
||||
|
) |
||||
|
|
||||
|
// StdLogger is what your logrus-enabled library should take, that way
|
||||
|
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||
|
// interface, this is the closest we get, unfortunately.
|
||||
|
type StdLogger interface { |
||||
|
Print(...interface{}) |
||||
|
Printf(string, ...interface{}) |
||||
|
Println(...interface{}) |
||||
|
|
||||
|
Fatal(...interface{}) |
||||
|
Fatalf(string, ...interface{}) |
||||
|
Fatalln(...interface{}) |
||||
|
|
||||
|
Panic(...interface{}) |
||||
|
Panicf(string, ...interface{}) |
||||
|
Panicln(...interface{}) |
||||
|
} |
||||
|
|
||||
|
// The FieldLogger interface generalizes the Entry and Logger types
|
||||
|
type FieldLogger interface { |
||||
|
WithField(key string, value interface{}) *Entry |
||||
|
WithFields(fields Fields) *Entry |
||||
|
WithError(err error) *Entry |
||||
|
|
||||
|
Debugf(format string, args ...interface{}) |
||||
|
Infof(format string, args ...interface{}) |
||||
|
Printf(format string, args ...interface{}) |
||||
|
Warnf(format string, args ...interface{}) |
||||
|
Warningf(format string, args ...interface{}) |
||||
|
Errorf(format string, args ...interface{}) |
||||
|
Fatalf(format string, args ...interface{}) |
||||
|
Panicf(format string, args ...interface{}) |
||||
|
|
||||
|
Debug(args ...interface{}) |
||||
|
Info(args ...interface{}) |
||||
|
Print(args ...interface{}) |
||||
|
Warn(args ...interface{}) |
||||
|
Warning(args ...interface{}) |
||||
|
Error(args ...interface{}) |
||||
|
Fatal(args ...interface{}) |
||||
|
Panic(args ...interface{}) |
||||
|
|
||||
|
Debugln(args ...interface{}) |
||||
|
Infoln(args ...interface{}) |
||||
|
Println(args ...interface{}) |
||||
|
Warnln(args ...interface{}) |
||||
|
Warningln(args ...interface{}) |
||||
|
Errorln(args ...interface{}) |
||||
|
Fatalln(args ...interface{}) |
||||
|
Panicln(args ...interface{}) |
||||
|
} |
@ -0,0 +1,361 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"encoding/json" |
||||
|
"strconv" |
||||
|
"strings" |
||||
|
"sync" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/stretchr/testify/assert" |
||||
|
) |
||||
|
|
||||
|
func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { |
||||
|
var buffer bytes.Buffer |
||||
|
var fields Fields |
||||
|
|
||||
|
logger := New() |
||||
|
logger.Out = &buffer |
||||
|
logger.Formatter = new(JSONFormatter) |
||||
|
|
||||
|
log(logger) |
||||
|
|
||||
|
err := json.Unmarshal(buffer.Bytes(), &fields) |
||||
|
assert.Nil(t, err) |
||||
|
|
||||
|
assertions(fields) |
||||
|
} |
||||
|
|
||||
|
func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { |
||||
|
var buffer bytes.Buffer |
||||
|
|
||||
|
logger := New() |
||||
|
logger.Out = &buffer |
||||
|
logger.Formatter = &TextFormatter{ |
||||
|
DisableColors: true, |
||||
|
} |
||||
|
|
||||
|
log(logger) |
||||
|
|
||||
|
fields := make(map[string]string) |
||||
|
for _, kv := range strings.Split(buffer.String(), " ") { |
||||
|
if !strings.Contains(kv, "=") { |
||||
|
continue |
||||
|
} |
||||
|
kvArr := strings.Split(kv, "=") |
||||
|
key := strings.TrimSpace(kvArr[0]) |
||||
|
val := kvArr[1] |
||||
|
if kvArr[1][0] == '"' { |
||||
|
var err error |
||||
|
val, err = strconv.Unquote(val) |
||||
|
assert.NoError(t, err) |
||||
|
} |
||||
|
fields[key] = val |
||||
|
} |
||||
|
assertions(fields) |
||||
|
} |
||||
|
|
||||
|
func TestPrint(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Print("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "test") |
||||
|
assert.Equal(t, fields["level"], "info") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestInfo(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Info("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "test") |
||||
|
assert.Equal(t, fields["level"], "info") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestWarn(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Warn("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "test") |
||||
|
assert.Equal(t, fields["level"], "warning") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Infoln("test", "test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "test test") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Infoln("test", 10) |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "test 10") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Infoln(10, 10) |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "10 10") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Infoln(10, 10) |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "10 10") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Info("test", 10) |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "test10") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.Info("test", "test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "testtest") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestWithFieldsShouldAllowAssignments(t *testing.T) { |
||||
|
var buffer bytes.Buffer |
||||
|
var fields Fields |
||||
|
|
||||
|
logger := New() |
||||
|
logger.Out = &buffer |
||||
|
logger.Formatter = new(JSONFormatter) |
||||
|
|
||||
|
localLog := logger.WithFields(Fields{ |
||||
|
"key1": "value1", |
||||
|
}) |
||||
|
|
||||
|
localLog.WithField("key2", "value2").Info("test") |
||||
|
err := json.Unmarshal(buffer.Bytes(), &fields) |
||||
|
assert.Nil(t, err) |
||||
|
|
||||
|
assert.Equal(t, "value2", fields["key2"]) |
||||
|
assert.Equal(t, "value1", fields["key1"]) |
||||
|
|
||||
|
buffer = bytes.Buffer{} |
||||
|
fields = Fields{} |
||||
|
localLog.Info("test") |
||||
|
err = json.Unmarshal(buffer.Bytes(), &fields) |
||||
|
assert.Nil(t, err) |
||||
|
|
||||
|
_, ok := fields["key2"] |
||||
|
assert.Equal(t, false, ok) |
||||
|
assert.Equal(t, "value1", fields["key1"]) |
||||
|
} |
||||
|
|
||||
|
func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.WithField("msg", "hello").Info("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "test") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.WithField("msg", "hello").Info("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["msg"], "test") |
||||
|
assert.Equal(t, fields["fields.msg"], "hello") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.WithField("time", "hello").Info("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["fields.time"], "hello") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { |
||||
|
LogAndAssertJSON(t, func(log *Logger) { |
||||
|
log.WithField("level", 1).Info("test") |
||||
|
}, func(fields Fields) { |
||||
|
assert.Equal(t, fields["level"], "info") |
||||
|
assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only
|
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestDefaultFieldsAreNotPrefixed(t *testing.T) { |
||||
|
LogAndAssertText(t, func(log *Logger) { |
||||
|
ll := log.WithField("herp", "derp") |
||||
|
ll.Info("hello") |
||||
|
ll.Info("bye") |
||||
|
}, func(fields map[string]string) { |
||||
|
for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { |
||||
|
if _, ok := fields[fieldName]; ok { |
||||
|
t.Fatalf("should not have prefixed %q: %v", fieldName, fields) |
||||
|
} |
||||
|
} |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { |
||||
|
|
||||
|
var buffer bytes.Buffer |
||||
|
var fields Fields |
||||
|
|
||||
|
logger := New() |
||||
|
logger.Out = &buffer |
||||
|
logger.Formatter = new(JSONFormatter) |
||||
|
|
||||
|
llog := logger.WithField("context", "eating raw fish") |
||||
|
|
||||
|
llog.Info("looks delicious") |
||||
|
|
||||
|
err := json.Unmarshal(buffer.Bytes(), &fields) |
||||
|
assert.NoError(t, err, "should have decoded first message") |
||||
|
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") |
||||
|
assert.Equal(t, fields["msg"], "looks delicious") |
||||
|
assert.Equal(t, fields["context"], "eating raw fish") |
||||
|
|
||||
|
buffer.Reset() |
||||
|
|
||||
|
llog.Warn("omg it is!") |
||||
|
|
||||
|
err = json.Unmarshal(buffer.Bytes(), &fields) |
||||
|
assert.NoError(t, err, "should have decoded second message") |
||||
|
assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") |
||||
|
assert.Equal(t, fields["msg"], "omg it is!") |
||||
|
assert.Equal(t, fields["context"], "eating raw fish") |
||||
|
assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func TestConvertLevelToString(t *testing.T) { |
||||
|
assert.Equal(t, "debug", DebugLevel.String()) |
||||
|
assert.Equal(t, "info", InfoLevel.String()) |
||||
|
assert.Equal(t, "warning", WarnLevel.String()) |
||||
|
assert.Equal(t, "error", ErrorLevel.String()) |
||||
|
assert.Equal(t, "fatal", FatalLevel.String()) |
||||
|
assert.Equal(t, "panic", PanicLevel.String()) |
||||
|
} |
||||
|
|
||||
|
func TestParseLevel(t *testing.T) { |
||||
|
l, err := ParseLevel("panic") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, PanicLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("PANIC") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, PanicLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("fatal") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, FatalLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("FATAL") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, FatalLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("error") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, ErrorLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("ERROR") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, ErrorLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("warn") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, WarnLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("WARN") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, WarnLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("warning") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, WarnLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("WARNING") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, WarnLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("info") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, InfoLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("INFO") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, InfoLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("debug") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, DebugLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("DEBUG") |
||||
|
assert.Nil(t, err) |
||||
|
assert.Equal(t, DebugLevel, l) |
||||
|
|
||||
|
l, err = ParseLevel("invalid") |
||||
|
assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) |
||||
|
} |
||||
|
|
||||
|
func TestGetSetLevelRace(t *testing.T) { |
||||
|
wg := sync.WaitGroup{} |
||||
|
for i := 0; i < 100; i++ { |
||||
|
wg.Add(1) |
||||
|
go func(i int) { |
||||
|
defer wg.Done() |
||||
|
if i%2 == 0 { |
||||
|
SetLevel(InfoLevel) |
||||
|
} else { |
||||
|
GetLevel() |
||||
|
} |
||||
|
}(i) |
||||
|
|
||||
|
} |
||||
|
wg.Wait() |
||||
|
} |
||||
|
|
||||
|
func TestLoggingRace(t *testing.T) { |
||||
|
logger := New() |
||||
|
|
||||
|
var wg sync.WaitGroup |
||||
|
wg.Add(100) |
||||
|
|
||||
|
for i := 0; i < 100; i++ { |
||||
|
go func() { |
||||
|
logger.Info("info") |
||||
|
wg.Done() |
||||
|
}() |
||||
|
} |
||||
|
wg.Wait() |
||||
|
} |
||||
|
|
||||
|
// Compile test
|
||||
|
func TestLogrusInterface(t *testing.T) { |
||||
|
var buffer bytes.Buffer |
||||
|
fn := func(l FieldLogger) { |
||||
|
b := l.WithField("key", "value") |
||||
|
b.Debug("Test") |
||||
|
} |
||||
|
// test logger
|
||||
|
logger := New() |
||||
|
logger.Out = &buffer |
||||
|
fn(logger) |
||||
|
|
||||
|
// test Entry
|
||||
|
e := logger.WithField("another", "value") |
||||
|
fn(e) |
||||
|
} |
@ -0,0 +1,9 @@ |
|||||
|
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
|
|
||||
|
package logrus |
||||
|
|
||||
|
import "syscall" |
||||
|
|
||||
|
const ioctlReadTermios = syscall.TIOCGETA |
||||
|
|
||||
|
type Termios syscall.Termios |
@ -0,0 +1,12 @@ |
|||||
|
// Based on ssh/terminal:
|
||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
|
// Use of this source code is governed by a BSD-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
package logrus |
||||
|
|
||||
|
import "syscall" |
||||
|
|
||||
|
const ioctlReadTermios = syscall.TCGETS |
||||
|
|
||||
|
type Termios syscall.Termios |
@ -0,0 +1,21 @@ |
|||||
|
// Based on ssh/terminal:
|
||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
|
// Use of this source code is governed by a BSD-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||
|
|
||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"syscall" |
||||
|
"unsafe" |
||||
|
) |
||||
|
|
||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
|
func IsTerminal() bool { |
||||
|
fd := syscall.Stderr |
||||
|
var termios Termios |
||||
|
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) |
||||
|
return err == 0 |
||||
|
} |
@ -0,0 +1,15 @@ |
|||||
|
// +build solaris
|
||||
|
|
||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"os" |
||||
|
|
||||
|
"golang.org/x/sys/unix" |
||||
|
) |
||||
|
|
||||
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
|
func IsTerminal() bool { |
||||
|
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) |
||||
|
return err == nil |
||||
|
} |
@ -0,0 +1,27 @@ |
|||||
|
// Based on ssh/terminal:
|
||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
|
// Use of this source code is governed by a BSD-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
// +build windows
|
||||
|
|
||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"syscall" |
||||
|
"unsafe" |
||||
|
) |
||||
|
|
||||
|
var kernel32 = syscall.NewLazyDLL("kernel32.dll") |
||||
|
|
||||
|
var ( |
||||
|
procGetConsoleMode = kernel32.NewProc("GetConsoleMode") |
||||
|
) |
||||
|
|
||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||
|
func IsTerminal() bool { |
||||
|
fd := syscall.Stderr |
||||
|
var st uint32 |
||||
|
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) |
||||
|
return r != 0 && e == 0 |
||||
|
} |
@ -0,0 +1,161 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"fmt" |
||||
|
"runtime" |
||||
|
"sort" |
||||
|
"strings" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
nocolor = 0 |
||||
|
red = 31 |
||||
|
green = 32 |
||||
|
yellow = 33 |
||||
|
blue = 34 |
||||
|
gray = 37 |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
baseTimestamp time.Time |
||||
|
isTerminal bool |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
baseTimestamp = time.Now() |
||||
|
isTerminal = IsTerminal() |
||||
|
} |
||||
|
|
||||
|
func miniTS() int { |
||||
|
return int(time.Since(baseTimestamp) / time.Second) |
||||
|
} |
||||
|
|
||||
|
type TextFormatter struct { |
||||
|
// Set to true to bypass checking for a TTY before outputting colors.
|
||||
|
ForceColors bool |
||||
|
|
||||
|
// Force disabling colors.
|
||||
|
DisableColors bool |
||||
|
|
||||
|
// Disable timestamp logging. useful when output is redirected to logging
|
||||
|
// system that already adds timestamps.
|
||||
|
DisableTimestamp bool |
||||
|
|
||||
|
// Enable logging the full timestamp when a TTY is attached instead of just
|
||||
|
// the time passed since beginning of execution.
|
||||
|
FullTimestamp bool |
||||
|
|
||||
|
// TimestampFormat to use for display when a full timestamp is printed
|
||||
|
TimestampFormat string |
||||
|
|
||||
|
// The fields are sorted by default for a consistent output. For applications
|
||||
|
// that log extremely frequently and don't use the JSON formatter this may not
|
||||
|
// be desired.
|
||||
|
DisableSorting bool |
||||
|
} |
||||
|
|
||||
|
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { |
||||
|
var keys []string = make([]string, 0, len(entry.Data)) |
||||
|
for k := range entry.Data { |
||||
|
keys = append(keys, k) |
||||
|
} |
||||
|
|
||||
|
if !f.DisableSorting { |
||||
|
sort.Strings(keys) |
||||
|
} |
||||
|
|
||||
|
b := &bytes.Buffer{} |
||||
|
|
||||
|
prefixFieldClashes(entry.Data) |
||||
|
|
||||
|
isColorTerminal := isTerminal && (runtime.GOOS != "windows") |
||||
|
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors |
||||
|
|
||||
|
timestampFormat := f.TimestampFormat |
||||
|
if timestampFormat == "" { |
||||
|
timestampFormat = DefaultTimestampFormat |
||||
|
} |
||||
|
if isColored { |
||||
|
f.printColored(b, entry, keys, timestampFormat) |
||||
|
} else { |
||||
|
if !f.DisableTimestamp { |
||||
|
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) |
||||
|
} |
||||
|
f.appendKeyValue(b, "level", entry.Level.String()) |
||||
|
if entry.Message != "" { |
||||
|
f.appendKeyValue(b, "msg", entry.Message) |
||||
|
} |
||||
|
for _, key := range keys { |
||||
|
f.appendKeyValue(b, key, entry.Data[key]) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
b.WriteByte('\n') |
||||
|
return b.Bytes(), nil |
||||
|
} |
||||
|
|
||||
|
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { |
||||
|
var levelColor int |
||||
|
switch entry.Level { |
||||
|
case DebugLevel: |
||||
|
levelColor = gray |
||||
|
case WarnLevel: |
||||
|
levelColor = yellow |
||||
|
case ErrorLevel, FatalLevel, PanicLevel: |
||||
|
levelColor = red |
||||
|
default: |
||||
|
levelColor = blue |
||||
|
} |
||||
|
|
||||
|
levelText := strings.ToUpper(entry.Level.String())[0:4] |
||||
|
|
||||
|
if !f.FullTimestamp { |
||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) |
||||
|
} else { |
||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) |
||||
|
} |
||||
|
for _, k := range keys { |
||||
|
v := entry.Data[k] |
||||
|
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func needsQuoting(text string) bool { |
||||
|
for _, ch := range text { |
||||
|
if !((ch >= 'a' && ch <= 'z') || |
||||
|
(ch >= 'A' && ch <= 'Z') || |
||||
|
(ch >= '0' && ch <= '9') || |
||||
|
ch == '-' || ch == '.') { |
||||
|
return true |
||||
|
} |
||||
|
} |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { |
||||
|
|
||||
|
b.WriteString(key) |
||||
|
b.WriteByte('=') |
||||
|
|
||||
|
switch value := value.(type) { |
||||
|
case string: |
||||
|
if !needsQuoting(value) { |
||||
|
b.WriteString(value) |
||||
|
} else { |
||||
|
fmt.Fprintf(b, "%q", value) |
||||
|
} |
||||
|
case error: |
||||
|
errmsg := value.Error() |
||||
|
if !needsQuoting(errmsg) { |
||||
|
b.WriteString(errmsg) |
||||
|
} else { |
||||
|
fmt.Fprintf(b, "%q", value) |
||||
|
} |
||||
|
default: |
||||
|
fmt.Fprint(b, value) |
||||
|
} |
||||
|
|
||||
|
b.WriteByte(' ') |
||||
|
} |
@ -0,0 +1,61 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"errors" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
func TestQuoting(t *testing.T) { |
||||
|
tf := &TextFormatter{DisableColors: true} |
||||
|
|
||||
|
checkQuoting := func(q bool, value interface{}) { |
||||
|
b, _ := tf.Format(WithField("test", value)) |
||||
|
idx := bytes.Index(b, ([]byte)("test=")) |
||||
|
cont := bytes.Contains(b[idx+5:], []byte{'"'}) |
||||
|
if cont != q { |
||||
|
if q { |
||||
|
t.Errorf("quoting expected for: %#v", value) |
||||
|
} else { |
||||
|
t.Errorf("quoting not expected for: %#v", value) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
checkQuoting(false, "abcd") |
||||
|
checkQuoting(false, "v1.0") |
||||
|
checkQuoting(false, "1234567890") |
||||
|
checkQuoting(true, "/foobar") |
||||
|
checkQuoting(true, "x y") |
||||
|
checkQuoting(true, "x,y") |
||||
|
checkQuoting(false, errors.New("invalid")) |
||||
|
checkQuoting(true, errors.New("invalid argument")) |
||||
|
} |
||||
|
|
||||
|
func TestTimestampFormat(t *testing.T) { |
||||
|
checkTimeStr := func(format string) { |
||||
|
customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} |
||||
|
customStr, _ := customFormatter.Format(WithField("test", "test")) |
||||
|
timeStart := bytes.Index(customStr, ([]byte)("time=")) |
||||
|
timeEnd := bytes.Index(customStr, ([]byte)("level=")) |
||||
|
timeStr := customStr[timeStart+5 : timeEnd-1] |
||||
|
if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { |
||||
|
timeStr = timeStr[1 : len(timeStr)-1] |
||||
|
} |
||||
|
if format == "" { |
||||
|
format = time.RFC3339 |
||||
|
} |
||||
|
_, e := time.Parse(format, (string)(timeStr)) |
||||
|
if e != nil { |
||||
|
t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") |
||||
|
checkTimeStr("Mon Jan _2 15:04:05 2006") |
||||
|
checkTimeStr("") |
||||
|
} |
||||
|
|
||||
|
// TODO add tests for sorting etc., this requires a parser for the text
|
||||
|
// formatter output.
|
@ -0,0 +1,53 @@ |
|||||
|
package logrus |
||||
|
|
||||
|
import ( |
||||
|
"bufio" |
||||
|
"io" |
||||
|
"runtime" |
||||
|
) |
||||
|
|
||||
|
func (logger *Logger) Writer() *io.PipeWriter { |
||||
|
return logger.WriterLevel(InfoLevel) |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { |
||||
|
reader, writer := io.Pipe() |
||||
|
|
||||
|
var printFunc func(args ...interface{}) |
||||
|
switch level { |
||||
|
case DebugLevel: |
||||
|
printFunc = logger.Debug |
||||
|
case InfoLevel: |
||||
|
printFunc = logger.Info |
||||
|
case WarnLevel: |
||||
|
printFunc = logger.Warn |
||||
|
case ErrorLevel: |
||||
|
printFunc = logger.Error |
||||
|
case FatalLevel: |
||||
|
printFunc = logger.Fatal |
||||
|
case PanicLevel: |
||||
|
printFunc = logger.Panic |
||||
|
default: |
||||
|
printFunc = logger.Print |
||||
|
} |
||||
|
|
||||
|
go logger.writerScanner(reader, printFunc) |
||||
|
runtime.SetFinalizer(writer, writerFinalizer) |
||||
|
|
||||
|
return writer |
||||
|
} |
||||
|
|
||||
|
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { |
||||
|
scanner := bufio.NewScanner(reader) |
||||
|
for scanner.Scan() { |
||||
|
printFunc(scanner.Text()) |
||||
|
} |
||||
|
if err := scanner.Err(); err != nil { |
||||
|
logger.Errorf("Error while reading from Writer: %s", err) |
||||
|
} |
||||
|
reader.Close() |
||||
|
} |
||||
|
|
||||
|
func writerFinalizer(writer *io.PipeWriter) { |
||||
|
writer.Close() |
||||
|
} |
@ -0,0 +1,47 @@ |
|||||
|
# go-shellwords |
||||
|
|
||||
|
[![Coverage Status](https://coveralls.io/repos/mattn/go-shellwords/badge.png?branch=master)](https://coveralls.io/r/mattn/go-shellwords?branch=master) |
||||
|
[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) |
||||
|
|
||||
|
Parse line as shell words. |
||||
|
|
||||
|
## Usage |
||||
|
|
||||
|
```go |
||||
|
args, err := shellwords.Parse("./foo --bar=baz") |
||||
|
// args should be ["./foo", "--bar=baz"] |
||||
|
``` |
||||
|
|
||||
|
```go |
||||
|
os.Setenv("FOO", "bar") |
||||
|
p := shellwords.NewParser() |
||||
|
p.ParseEnv = true |
||||
|
args, err := p.Parse("./foo $FOO") |
||||
|
// args should be ["./foo", "bar"] |
||||
|
``` |
||||
|
|
||||
|
```go |
||||
|
p := shellwords.NewParser() |
||||
|
p.ParseBacktick = true |
||||
|
args, err := p.Parse("./foo `echo $SHELL`") |
||||
|
// args should be ["./foo", "/bin/bash"] |
||||
|
``` |
||||
|
|
||||
|
```go |
||||
|
shellwords.ParseBacktick = true |
||||
|
p := shellwords.NewParser() |
||||
|
args, err := p.Parse("./foo `echo $SHELL`") |
||||
|
// args should be ["./foo", "/bin/bash"] |
||||
|
``` |
||||
|
|
||||
|
# Thanks |
||||
|
|
||||
|
This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). |
||||
|
|
||||
|
# License |
||||
|
|
||||
|
under the MIT License: http://mattn.mit-license.org/2014 |
||||
|
|
||||
|
# Author |
||||
|
|
||||
|
Yasuhiro Matsumoto (a.k.a mattn) |
@ -0,0 +1,134 @@ |
|||||
|
package shellwords |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"os" |
||||
|
"regexp" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
ParseEnv bool = false |
||||
|
ParseBacktick bool = false |
||||
|
) |
||||
|
|
||||
|
var envRe = regexp.MustCompile(`\$({[a-zA-Z0-9_]+}|[a-zA-Z0-9_]+)`) |
||||
|
|
||||
|
func isSpace(r rune) bool { |
||||
|
switch r { |
||||
|
case ' ', '\t', '\r', '\n': |
||||
|
return true |
||||
|
} |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
func replaceEnv(s string) string { |
||||
|
return envRe.ReplaceAllStringFunc(s, func(s string) string { |
||||
|
s = s[1:] |
||||
|
if s[0] == '{' { |
||||
|
s = s[1 : len(s)-1] |
||||
|
} |
||||
|
return os.Getenv(s) |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
type Parser struct { |
||||
|
ParseEnv bool |
||||
|
ParseBacktick bool |
||||
|
} |
||||
|
|
||||
|
func NewParser() *Parser { |
||||
|
return &Parser{ParseEnv, ParseBacktick} |
||||
|
} |
||||
|
|
||||
|
func (p *Parser) Parse(line string) ([]string, error) { |
||||
|
line = strings.TrimSpace(line) |
||||
|
|
||||
|
args := []string{} |
||||
|
buf := "" |
||||
|
var escaped, doubleQuoted, singleQuoted, backQuote bool |
||||
|
backtick := "" |
||||
|
|
||||
|
for _, r := range line { |
||||
|
if escaped { |
||||
|
buf += string(r) |
||||
|
escaped = false |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
if r == '\\' { |
||||
|
if singleQuoted { |
||||
|
buf += string(r) |
||||
|
} else { |
||||
|
escaped = true |
||||
|
} |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
if isSpace(r) { |
||||
|
if singleQuoted || doubleQuoted || backQuote { |
||||
|
buf += string(r) |
||||
|
backtick += string(r) |
||||
|
} else if buf != "" { |
||||
|
if p.ParseEnv { |
||||
|
buf = replaceEnv(buf) |
||||
|
} |
||||
|
args = append(args, buf) |
||||
|
buf = "" |
||||
|
} |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
switch r { |
||||
|
case '`': |
||||
|
if !singleQuoted && !doubleQuoted { |
||||
|
if p.ParseBacktick { |
||||
|
if backQuote { |
||||
|
out, err := shellRun(backtick) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
buf = out |
||||
|
} |
||||
|
backtick = "" |
||||
|
backQuote = !backQuote |
||||
|
continue |
||||
|
} |
||||
|
backtick = "" |
||||
|
backQuote = !backQuote |
||||
|
} |
||||
|
case '"': |
||||
|
if !singleQuoted { |
||||
|
doubleQuoted = !doubleQuoted |
||||
|
continue |
||||
|
} |
||||
|
case '\'': |
||||
|
if !doubleQuoted { |
||||
|
singleQuoted = !singleQuoted |
||||
|
continue |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
buf += string(r) |
||||
|
if backQuote { |
||||
|
backtick += string(r) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if buf != "" { |
||||
|
if p.ParseEnv { |
||||
|
buf = replaceEnv(buf) |
||||
|
} |
||||
|
args = append(args, buf) |
||||
|
} |
||||
|
|
||||
|
if escaped || singleQuoted || doubleQuoted || backQuote { |
||||
|
return nil, errors.New("invalid command line string") |
||||
|
} |
||||
|
|
||||
|
return args, nil |
||||
|
} |
||||
|
|
||||
|
func Parse(line string) ([]string, error) { |
||||
|
return NewParser().Parse(line) |
||||
|
} |
@ -0,0 +1,132 @@ |
|||||
|
package shellwords |
||||
|
|
||||
|
import ( |
||||
|
"os" |
||||
|
"reflect" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
var testcases = []struct { |
||||
|
line string |
||||
|
expected []string |
||||
|
}{ |
||||
|
{`var --bar=baz`, []string{`var`, `--bar=baz`}}, |
||||
|
{`var --bar="baz"`, []string{`var`, `--bar=baz`}}, |
||||
|
{`var "--bar=baz"`, []string{`var`, `--bar=baz`}}, |
||||
|
{`var "--bar='baz'"`, []string{`var`, `--bar='baz'`}}, |
||||
|
{"var --bar=`baz`", []string{`var`, "--bar=`baz`"}}, |
||||
|
{`var "--bar=\"baz'"`, []string{`var`, `--bar="baz'`}}, |
||||
|
{`var "--bar=\'baz\'"`, []string{`var`, `--bar='baz'`}}, |
||||
|
{`var --bar='\'`, []string{`var`, `--bar=\`}}, |
||||
|
{`var "--bar baz"`, []string{`var`, `--bar baz`}}, |
||||
|
{`var --"bar baz"`, []string{`var`, `--bar baz`}}, |
||||
|
{`var --"bar baz"`, []string{`var`, `--bar baz`}}, |
||||
|
} |
||||
|
|
||||
|
func TestSimple(t *testing.T) { |
||||
|
for _, testcase := range testcases { |
||||
|
args, err := Parse(testcase.line) |
||||
|
if err != nil { |
||||
|
t.Fatalf(err.Error()) |
||||
|
} |
||||
|
if !reflect.DeepEqual(args, testcase.expected) { |
||||
|
t.Fatalf("Expected %v, but %v:", testcase.expected, args) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestError(t *testing.T) { |
||||
|
_, err := Parse("foo '") |
||||
|
if err == nil { |
||||
|
t.Fatalf("Should be an error") |
||||
|
} |
||||
|
_, err = Parse(`foo "`) |
||||
|
if err == nil { |
||||
|
t.Fatalf("Should be an error") |
||||
|
} |
||||
|
|
||||
|
_, err = Parse("foo `") |
||||
|
if err == nil { |
||||
|
t.Fatalf("Should be an error") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestBacktick(t *testing.T) { |
||||
|
goversion, err := shellRun("go version") |
||||
|
if err != nil { |
||||
|
t.Fatalf(err.Error()) |
||||
|
} |
||||
|
|
||||
|
parser := NewParser() |
||||
|
parser.ParseBacktick = true |
||||
|
args, err := parser.Parse("echo `go version`") |
||||
|
if err != nil { |
||||
|
t.Fatalf(err.Error()) |
||||
|
} |
||||
|
expected := []string{"echo", goversion} |
||||
|
if !reflect.DeepEqual(args, expected) { |
||||
|
t.Fatalf("Expected %v, but %v:", expected, args) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestBacktickError(t *testing.T) { |
||||
|
parser := NewParser() |
||||
|
parser.ParseBacktick = true |
||||
|
_, err := parser.Parse("echo `go Version`") |
||||
|
if err == nil { |
||||
|
t.Fatalf("Should be an error") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestEnv(t *testing.T) { |
||||
|
os.Setenv("FOO", "bar") |
||||
|
|
||||
|
parser := NewParser() |
||||
|
parser.ParseEnv = true |
||||
|
args, err := parser.Parse("echo $FOO") |
||||
|
if err != nil { |
||||
|
t.Fatalf(err.Error()) |
||||
|
} |
||||
|
expected := []string{"echo", "bar"} |
||||
|
if !reflect.DeepEqual(args, expected) { |
||||
|
t.Fatalf("Expected %v, but %v:", expected, args) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestNoEnv(t *testing.T) { |
||||
|
parser := NewParser() |
||||
|
parser.ParseEnv = true |
||||
|
args, err := parser.Parse("echo $BAR") |
||||
|
if err != nil { |
||||
|
t.Fatalf(err.Error()) |
||||
|
} |
||||
|
expected := []string{"echo", ""} |
||||
|
if !reflect.DeepEqual(args, expected) { |
||||
|
t.Fatalf("Expected %v, but %v:", expected, args) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestDupEnv(t *testing.T) { |
||||
|
os.Setenv("FOO", "bar") |
||||
|
os.Setenv("FOO_BAR", "baz") |
||||
|
|
||||
|
parser := NewParser() |
||||
|
parser.ParseEnv = true |
||||
|
args, err := parser.Parse("echo $$FOO$") |
||||
|
if err != nil { |
||||
|
t.Fatalf(err.Error()) |
||||
|
} |
||||
|
expected := []string{"echo", "$bar$"} |
||||
|
if !reflect.DeepEqual(args, expected) { |
||||
|
t.Fatalf("Expected %v, but %v:", expected, args) |
||||
|
} |
||||
|
|
||||
|
args, err = parser.Parse("echo $${FOO_BAR}$") |
||||
|
if err != nil { |
||||
|
t.Fatalf(err.Error()) |
||||
|
} |
||||
|
expected = []string{"echo", "$baz$"} |
||||
|
if !reflect.DeepEqual(args, expected) { |
||||
|
t.Fatalf("Expected %v, but %v:", expected, args) |
||||
|
} |
||||
|
} |
@ -0,0 +1,19 @@ |
|||||
|
// +build !windows
|
||||
|
|
||||
|
package shellwords |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"os" |
||||
|
"os/exec" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
func shellRun(line string) (string, error) { |
||||
|
shell := os.Getenv("SHELL") |
||||
|
b, err := exec.Command(shell, "-c", line).Output() |
||||
|
if err != nil { |
||||
|
return "", errors.New(err.Error() + ":" + string(b)) |
||||
|
} |
||||
|
return strings.TrimSpace(string(b)), nil |
||||
|
} |
@ -0,0 +1,17 @@ |
|||||
|
package shellwords |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"os" |
||||
|
"os/exec" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
func shellRun(line string) (string, error) { |
||||
|
shell := os.Getenv("COMSPEC") |
||||
|
b, err := exec.Command(shell, "/c", line).Output() |
||||
|
if err != nil { |
||||
|
return "", errors.New(err.Error() + ":" + string(b)) |
||||
|
} |
||||
|
return strings.TrimSpace(string(b)), nil |
||||
|
} |
@ -0,0 +1,21 @@ |
|||||
|
The MIT License (MIT) |
||||
|
|
||||
|
Copyright (c) 2014 Yasuhiro Matsumoto |
||||
|
|
||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
|
of this software and associated documentation files (the "Software"), to deal |
||||
|
in the Software without restriction, including without limitation the rights |
||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
|
copies of the Software, and to permit persons to whom the Software is |
||||
|
furnished to do so, subject to the following conditions: |
||||
|
|
||||
|
The above copyright notice and this permission notice shall be included in all |
||||
|
copies or substantial portions of the Software. |
||||
|
|
||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||
|
SOFTWARE. |
@ -0,0 +1,81 @@ |
|||||
|
go-sqlite3 |
||||
|
========== |
||||
|
|
||||
|
[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3) |
||||
|
[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master) |
||||
|
[![GoDoc](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3) |
||||
|
|
||||
|
Description |
||||
|
----------- |
||||
|
|
||||
|
sqlite3 driver conforming to the built-in database/sql interface |
||||
|
|
||||
|
Installation |
||||
|
------------ |
||||
|
|
||||
|
This package can be installed with the go get command: |
||||
|
|
||||
|
go get github.com/mattn/go-sqlite3 |
||||
|
|
||||
|
_go-sqlite3_ is *cgo* package. |
||||
|
If you want to build your app using go-sqlite3, you need gcc. |
||||
|
However, if you install _go-sqlite3_ with `go install github.com/mattn/go-sqlite3`, you don't need gcc to build your app anymore. |
||||
|
|
||||
|
Documentation |
||||
|
------------- |
||||
|
|
||||
|
API documentation can be found here: http://godoc.org/github.com/mattn/go-sqlite3 |
||||
|
|
||||
|
Examples can be found under the `./_example` directory |
||||
|
|
||||
|
FAQ |
||||
|
--- |
||||
|
|
||||
|
* Want to build go-sqlite3 with libsqlite3 on my linux. |
||||
|
|
||||
|
Use `go build --tags "libsqlite3 linux"` |
||||
|
|
||||
|
* Want to build go-sqlite3 with libsqlite3 on OS X. |
||||
|
|
||||
|
Install sqlite3 from homebrew: `brew install sqlite3` |
||||
|
|
||||
|
Use `go build --tags "libsqlite3 darwin"` |
||||
|
|
||||
|
* Want to build go-sqlite3 with icu extension. |
||||
|
|
||||
|
Use `go build --tags "icu"` |
||||
|
|
||||
|
* Can't build go-sqlite3 on windows 64bit. |
||||
|
|
||||
|
> Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit. |
||||
|
> See: https://github.com/mattn/go-sqlite3/issues/27 |
||||
|
|
||||
|
* Getting insert error while query is opened. |
||||
|
|
||||
|
> You can pass some arguments into the connection string, for example, a URI. |
||||
|
> See: https://github.com/mattn/go-sqlite3/issues/39 |
||||
|
|
||||
|
* Do you want to cross compile? mingw on Linux or Mac? |
||||
|
|
||||
|
> See: https://github.com/mattn/go-sqlite3/issues/106 |
||||
|
> See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html |
||||
|
|
||||
|
* Want to get time.Time with current locale |
||||
|
|
||||
|
Use `loc=auto` in SQLite3 filename schema like `file:foo.db?loc=auto`. |
||||
|
|
||||
|
License |
||||
|
------- |
||||
|
|
||||
|
MIT: http://mattn.mit-license.org/2012 |
||||
|
|
||||
|
sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h |
||||
|
|
||||
|
The -binding suffix was added to avoid build failures under gccgo. |
||||
|
|
||||
|
In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3. |
||||
|
|
||||
|
Author |
||||
|
------ |
||||
|
|
||||
|
Yasuhiro Matsumoto (a.k.a mattn) |
@ -0,0 +1,133 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"fmt" |
||||
|
"log" |
||||
|
"math" |
||||
|
"math/rand" |
||||
|
|
||||
|
sqlite "github.com/mattn/go-sqlite3" |
||||
|
) |
||||
|
|
||||
|
// Computes x^y
|
||||
|
func pow(x, y int64) int64 { |
||||
|
return int64(math.Pow(float64(x), float64(y))) |
||||
|
} |
||||
|
|
||||
|
// Computes the bitwise exclusive-or of all its arguments
|
||||
|
func xor(xs ...int64) int64 { |
||||
|
var ret int64 |
||||
|
for _, x := range xs { |
||||
|
ret ^= x |
||||
|
} |
||||
|
return ret |
||||
|
} |
||||
|
|
||||
|
// Returns a random number. It's actually deterministic here because
|
||||
|
// we don't seed the RNG, but it's an example of a non-pure function
|
||||
|
// from SQLite's POV.
|
||||
|
func getrand() int64 { |
||||
|
return rand.Int63() |
||||
|
} |
||||
|
|
||||
|
// Computes the standard deviation of a GROUPed BY set of values
|
||||
|
type stddev struct { |
||||
|
xs []int64 |
||||
|
// Running average calculation
|
||||
|
sum int64 |
||||
|
n int64 |
||||
|
} |
||||
|
|
||||
|
func newStddev() *stddev { return &stddev{} } |
||||
|
|
||||
|
func (s *stddev) Step(x int64) { |
||||
|
s.xs = append(s.xs, x) |
||||
|
s.sum += x |
||||
|
s.n++ |
||||
|
} |
||||
|
|
||||
|
func (s *stddev) Done() float64 { |
||||
|
mean := float64(s.sum) / float64(s.n) |
||||
|
var sqDiff []float64 |
||||
|
for _, x := range s.xs { |
||||
|
sqDiff = append(sqDiff, math.Pow(float64(x)-mean, 2)) |
||||
|
} |
||||
|
var dev float64 |
||||
|
for _, x := range sqDiff { |
||||
|
dev += x |
||||
|
} |
||||
|
dev /= float64(len(sqDiff)) |
||||
|
return math.Sqrt(dev) |
||||
|
} |
||||
|
|
||||
|
func main() { |
||||
|
sql.Register("sqlite3_custom", &sqlite.SQLiteDriver{ |
||||
|
ConnectHook: func(conn *sqlite.SQLiteConn) error { |
||||
|
if err := conn.RegisterFunc("pow", pow, true); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := conn.RegisterFunc("xor", xor, true); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := conn.RegisterFunc("rand", getrand, false); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := conn.RegisterAggregator("stddev", newStddev, true); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return nil |
||||
|
}, |
||||
|
}) |
||||
|
|
||||
|
db, err := sql.Open("sqlite3_custom", ":memory:") |
||||
|
if err != nil { |
||||
|
log.Fatal("Failed to open database:", err) |
||||
|
} |
||||
|
defer db.Close() |
||||
|
|
||||
|
var i int64 |
||||
|
err = db.QueryRow("SELECT pow(2,3)").Scan(&i) |
||||
|
if err != nil { |
||||
|
log.Fatal("POW query error:", err) |
||||
|
} |
||||
|
fmt.Println("pow(2,3) =", i) // 8
|
||||
|
|
||||
|
err = db.QueryRow("SELECT xor(1,2,3,4,5,6)").Scan(&i) |
||||
|
if err != nil { |
||||
|
log.Fatal("XOR query error:", err) |
||||
|
} |
||||
|
fmt.Println("xor(1,2,3,4,5) =", i) // 7
|
||||
|
|
||||
|
err = db.QueryRow("SELECT rand()").Scan(&i) |
||||
|
if err != nil { |
||||
|
log.Fatal("RAND query error:", err) |
||||
|
} |
||||
|
fmt.Println("rand() =", i) // pseudorandom
|
||||
|
|
||||
|
_, err = db.Exec("create table foo (department integer, profits integer)") |
||||
|
if err != nil { |
||||
|
log.Fatal("Failed to create table:", err) |
||||
|
} |
||||
|
_, err = db.Exec("insert into foo values (1, 10), (1, 20), (1, 45), (2, 42), (2, 115)") |
||||
|
if err != nil { |
||||
|
log.Fatal("Failed to insert records:", err) |
||||
|
} |
||||
|
|
||||
|
rows, err := db.Query("select department, stddev(profits) from foo group by department") |
||||
|
if err != nil { |
||||
|
log.Fatal("STDDEV query error:", err) |
||||
|
} |
||||
|
defer rows.Close() |
||||
|
for rows.Next() { |
||||
|
var dept int64 |
||||
|
var dev float64 |
||||
|
if err := rows.Scan(&dept, &dev); err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
fmt.Printf("dept=%d stddev=%f\n", dept, dev) |
||||
|
} |
||||
|
if err := rows.Err(); err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
} |
@ -0,0 +1,71 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"github.com/mattn/go-sqlite3" |
||||
|
"log" |
||||
|
"os" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
sqlite3conn := []*sqlite3.SQLiteConn{} |
||||
|
sql.Register("sqlite3_with_hook_example", |
||||
|
&sqlite3.SQLiteDriver{ |
||||
|
ConnectHook: func(conn *sqlite3.SQLiteConn) error { |
||||
|
sqlite3conn = append(sqlite3conn, conn) |
||||
|
return nil |
||||
|
}, |
||||
|
}) |
||||
|
os.Remove("./foo.db") |
||||
|
os.Remove("./bar.db") |
||||
|
|
||||
|
destDb, err := sql.Open("sqlite3_with_hook_example", "./foo.db") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer destDb.Close() |
||||
|
destDb.Ping() |
||||
|
|
||||
|
_, err = destDb.Exec("create table foo(id int, value text)") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
_, err = destDb.Exec("insert into foo values(1, 'foo')") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
_, err = destDb.Exec("insert into foo values(2, 'bar')") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
_, err = destDb.Query("select * from foo") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
srcDb, err := sql.Open("sqlite3_with_hook_example", "./bar.db") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer srcDb.Close() |
||||
|
srcDb.Ping() |
||||
|
|
||||
|
bk, err := sqlite3conn[1].Backup("main", sqlite3conn[0], "main") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
_, err = bk.Step(-1) |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
_, err = destDb.Query("select * from foo") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
_, err = destDb.Exec("insert into foo values(3, 'bar')") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
bk.Finish() |
||||
|
} |
@ -0,0 +1,22 @@ |
|||||
|
ifeq ($(OS),Windows_NT) |
||||
|
EXE=extension.exe |
||||
|
EXT=sqlite3_mod_regexp.dll |
||||
|
RM=cmd /c del |
||||
|
LDFLAG= |
||||
|
else |
||||
|
EXE=extension |
||||
|
EXT=sqlite3_mod_regexp.so |
||||
|
RM=rm |
||||
|
LDFLAG=-fPIC |
||||
|
endif |
||||
|
|
||||
|
all : $(EXE) $(EXT) |
||||
|
|
||||
|
$(EXE) : extension.go |
||||
|
go build $< |
||||
|
|
||||
|
$(EXT) : sqlite3_mod_regexp.c |
||||
|
gcc $(LDFLAG) -shared -o $@ $< -lsqlite3 -lpcre |
||||
|
|
||||
|
clean : |
||||
|
@-$(RM) $(EXE) $(EXT) |
@ -0,0 +1,43 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"fmt" |
||||
|
"github.com/mattn/go-sqlite3" |
||||
|
"log" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
sql.Register("sqlite3_with_extensions", |
||||
|
&sqlite3.SQLiteDriver{ |
||||
|
Extensions: []string{ |
||||
|
"sqlite3_mod_regexp", |
||||
|
}, |
||||
|
}) |
||||
|
|
||||
|
db, err := sql.Open("sqlite3_with_extensions", ":memory:") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer db.Close() |
||||
|
|
||||
|
// Force db to make a new connection in pool
|
||||
|
// by putting the original in a transaction
|
||||
|
tx, err := db.Begin() |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer tx.Commit() |
||||
|
|
||||
|
// New connection works (hopefully!)
|
||||
|
rows, err := db.Query("select 'hello world' where 'hello world' regexp '^hello.*d$'") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer rows.Close() |
||||
|
for rows.Next() { |
||||
|
var helloworld string |
||||
|
rows.Scan(&helloworld) |
||||
|
fmt.Println(helloworld) |
||||
|
} |
||||
|
} |
@ -0,0 +1,31 @@ |
|||||
|
#include <pcre.h> |
||||
|
#include <string.h> |
||||
|
#include <stdio.h> |
||||
|
#include <sqlite3ext.h> |
||||
|
|
||||
|
SQLITE_EXTENSION_INIT1 |
||||
|
static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { |
||||
|
if (argc >= 2) { |
||||
|
const char *target = (const char *)sqlite3_value_text(argv[1]); |
||||
|
const char *pattern = (const char *)sqlite3_value_text(argv[0]); |
||||
|
const char* errstr = NULL; |
||||
|
int erroff = 0; |
||||
|
int vec[500]; |
||||
|
int n, rc; |
||||
|
pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); |
||||
|
rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); |
||||
|
if (rc <= 0) { |
||||
|
sqlite3_result_error(context, errstr, 0); |
||||
|
return; |
||||
|
} |
||||
|
sqlite3_result_int(context, 1); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
#ifdef _WIN32 |
||||
|
__declspec(dllexport) |
||||
|
#endif |
||||
|
int sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) { |
||||
|
SQLITE_EXTENSION_INIT2(api); |
||||
|
return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, (void*)db, regexp_func, NULL, NULL); |
||||
|
} |
@ -0,0 +1,24 @@ |
|||||
|
ifeq ($(OS),Windows_NT) |
||||
|
EXE=extension.exe |
||||
|
EXT=sqlite3_mod_vtable.dll |
||||
|
RM=cmd /c del |
||||
|
LIBCURL=-lcurldll |
||||
|
LDFLAG= |
||||
|
else |
||||
|
EXE=extension |
||||
|
EXT=sqlite3_mod_vtable.so |
||||
|
RM=rm |
||||
|
LDFLAG=-fPIC |
||||
|
LIBCURL=-lcurl |
||||
|
endif |
||||
|
|
||||
|
all : $(EXE) $(EXT) |
||||
|
|
||||
|
$(EXE) : extension.go |
||||
|
go build $< |
||||
|
|
||||
|
$(EXT) : sqlite3_mod_vtable.cc |
||||
|
g++ $(LDFLAG) -shared -o $@ $< -lsqlite3 $(LIBCURL) |
||||
|
|
||||
|
clean : |
||||
|
@-$(RM) $(EXE) $(EXT) |
@ -0,0 +1,36 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"fmt" |
||||
|
"github.com/mattn/go-sqlite3" |
||||
|
"log" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
sql.Register("sqlite3_with_extensions", |
||||
|
&sqlite3.SQLiteDriver{ |
||||
|
Extensions: []string{ |
||||
|
"sqlite3_mod_vtable", |
||||
|
}, |
||||
|
}) |
||||
|
|
||||
|
db, err := sql.Open("sqlite3_with_extensions", ":memory:") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer db.Close() |
||||
|
|
||||
|
db.Exec("create virtual table repo using github(id, full_name, description, html_url)") |
||||
|
|
||||
|
rows, err := db.Query("select id, full_name, description, html_url from repo") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer rows.Close() |
||||
|
for rows.Next() { |
||||
|
var id, full_name, description, html_url string |
||||
|
rows.Scan(&id, &full_name, &description, &html_url) |
||||
|
fmt.Printf("%s: %s\n\t%s\n\t%s\n\n", id, full_name, description, html_url) |
||||
|
} |
||||
|
} |
1040
vendor/src/github.com/mattn/go-sqlite3/_example/mod_vtable/picojson.h
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,238 @@ |
|||||
|
#include <string>
|
||||
|
#include <sstream>
|
||||
|
#include <sqlite3-binding.h>
|
||||
|
#include <sqlite3ext.h>
|
||||
|
#include <curl/curl.h>
|
||||
|
#include "picojson.h"
|
||||
|
|
||||
|
#ifdef _WIN32
|
||||
|
# define EXPORT __declspec(dllexport)
|
||||
|
#else
|
||||
|
# define EXPORT
|
||||
|
#endif
|
||||
|
|
||||
|
SQLITE_EXTENSION_INIT1; |
||||
|
|
||||
|
typedef struct { |
||||
|
char* data; // response data from server
|
||||
|
size_t size; // response size of data
|
||||
|
} MEMFILE; |
||||
|
|
||||
|
MEMFILE* |
||||
|
memfopen() { |
||||
|
MEMFILE* mf = (MEMFILE*) malloc(sizeof(MEMFILE)); |
||||
|
if (mf) { |
||||
|
mf->data = NULL; |
||||
|
mf->size = 0; |
||||
|
} |
||||
|
return mf; |
||||
|
} |
||||
|
|
||||
|
void |
||||
|
memfclose(MEMFILE* mf) { |
||||
|
if (mf->data) free(mf->data); |
||||
|
free(mf); |
||||
|
} |
||||
|
|
||||
|
size_t |
||||
|
memfwrite(char* ptr, size_t size, size_t nmemb, void* stream) { |
||||
|
MEMFILE* mf = (MEMFILE*) stream; |
||||
|
int block = size * nmemb; |
||||
|
if (!mf) return block; // through
|
||||
|
if (!mf->data) |
||||
|
mf->data = (char*) malloc(block); |
||||
|
else |
||||
|
mf->data = (char*) realloc(mf->data, mf->size + block); |
||||
|
if (mf->data) { |
||||
|
memcpy(mf->data + mf->size, ptr, block); |
||||
|
mf->size += block; |
||||
|
} |
||||
|
return block; |
||||
|
} |
||||
|
|
||||
|
char* |
||||
|
memfstrdup(MEMFILE* mf) { |
||||
|
char* buf; |
||||
|
if (mf->size == 0) return NULL; |
||||
|
buf = (char*) malloc(mf->size + 1); |
||||
|
memcpy(buf, mf->data, mf->size); |
||||
|
buf[mf->size] = 0; |
||||
|
return buf; |
||||
|
} |
||||
|
|
||||
|
static int |
||||
|
my_connect(sqlite3 *db, void *pAux, int argc, const char * const *argv, sqlite3_vtab **ppVTab, char **c) { |
||||
|
std::stringstream ss; |
||||
|
ss << "CREATE TABLE " << argv[0] |
||||
|
<< "(id int, full_name text, description text, html_url text)"; |
||||
|
int rc = sqlite3_declare_vtab(db, ss.str().c_str()); |
||||
|
*ppVTab = (sqlite3_vtab *) sqlite3_malloc(sizeof(sqlite3_vtab)); |
||||
|
memset(*ppVTab, 0, sizeof(sqlite3_vtab)); |
||||
|
return rc; |
||||
|
} |
||||
|
|
||||
|
static int |
||||
|
my_create(sqlite3 *db, void *pAux, int argc, const char * const * argv, sqlite3_vtab **ppVTab, char **c) { |
||||
|
return my_connect(db, pAux, argc, argv, ppVTab, c); |
||||
|
} |
||||
|
|
||||
|
static int my_disconnect(sqlite3_vtab *pVTab) { |
||||
|
sqlite3_free(pVTab); |
||||
|
return SQLITE_OK; |
||||
|
} |
||||
|
|
||||
|
static int |
||||
|
my_destroy(sqlite3_vtab *pVTab) { |
||||
|
sqlite3_free(pVTab); |
||||
|
return SQLITE_OK; |
||||
|
} |
||||
|
|
||||
|
typedef struct { |
||||
|
sqlite3_vtab_cursor base; |
||||
|
int index; |
||||
|
picojson::value* rows; |
||||
|
} cursor; |
||||
|
|
||||
|
static int |
||||
|
my_open(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) { |
||||
|
MEMFILE* mf; |
||||
|
CURL* curl; |
||||
|
char* json; |
||||
|
CURLcode res = CURLE_OK; |
||||
|
char error[CURL_ERROR_SIZE] = {0}; |
||||
|
char* cert_file = getenv("SSL_CERT_FILE"); |
||||
|
|
||||
|
mf = memfopen(); |
||||
|
curl = curl_easy_init(); |
||||
|
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 1); |
||||
|
curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, 2); |
||||
|
curl_easy_setopt(curl, CURLOPT_USERAGENT, "curl/7.29.0"); |
||||
|
curl_easy_setopt(curl, CURLOPT_URL, "https://api.github.com/repositories"); |
||||
|
if (cert_file) |
||||
|
curl_easy_setopt(curl, CURLOPT_CAINFO, cert_file); |
||||
|
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); |
||||
|
curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, error); |
||||
|
curl_easy_setopt(curl, CURLOPT_WRITEDATA, mf); |
||||
|
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, memfwrite); |
||||
|
res = curl_easy_perform(curl); |
||||
|
curl_easy_cleanup(curl); |
||||
|
if (res != CURLE_OK) { |
||||
|
std::cerr << error << std::endl; |
||||
|
return SQLITE_FAIL; |
||||
|
} |
||||
|
|
||||
|
picojson::value* v = new picojson::value; |
||||
|
std::string err; |
||||
|
picojson::parse(*v, mf->data, mf->data + mf->size, &err); |
||||
|
memfclose(mf); |
||||
|
|
||||
|
if (!err.empty()) { |
||||
|
delete v; |
||||
|
std::cerr << err << std::endl; |
||||
|
return SQLITE_FAIL; |
||||
|
} |
||||
|
|
||||
|
cursor *c = (cursor *)sqlite3_malloc(sizeof(cursor)); |
||||
|
c->rows = v; |
||||
|
c->index = 0; |
||||
|
*ppCursor = &c->base; |
||||
|
return SQLITE_OK; |
||||
|
} |
||||
|
|
||||
|
static int |
||||
|
my_close(cursor *c) { |
||||
|
delete c->rows; |
||||
|
sqlite3_free(c); |
||||
|
return SQLITE_OK; |
||||
|
} |
||||
|
|
||||
|
static int |
||||
|
my_filter(cursor *c, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) { |
||||
|
c->index = 0; |
||||
|
return SQLITE_OK; |
||||
|
} |
||||
|
|
||||
|
static int |
||||
|
my_next(cursor *c) { |
||||
|
c->index++; |
||||
|
return SQLITE_OK; |
||||
|
} |
||||
|
|
||||
|
static int |
||||
|
my_eof(cursor *c) { |
||||
|
return c->index >= c->rows->get<picojson::array>().size() ? 1 : 0; |
||||
|
} |
||||
|
|
||||
|
static int |
||||
|
my_column(cursor *c, sqlite3_context *ctxt, int i) { |
||||
|
picojson::value v = c->rows->get<picojson::array>()[c->index]; |
||||
|
picojson::object row = v.get<picojson::object>(); |
||||
|
const char* p = NULL; |
||||
|
switch (i) { |
||||
|
case 0: |
||||
|
p = row["id"].to_str().c_str(); |
||||
|
break; |
||||
|
case 1: |
||||
|
p = row["full_name"].to_str().c_str(); |
||||
|
break; |
||||
|
case 2: |
||||
|
p = row["description"].to_str().c_str(); |
||||
|
break; |
||||
|
case 3: |
||||
|
p = row["html_url"].to_str().c_str(); |
||||
|
break; |
||||
|
} |
||||
|
sqlite3_result_text(ctxt, strdup(p), strlen(p), free); |
||||
|
return SQLITE_OK; |
||||
|
} |
||||
|
|
||||
|
static int |
||||
|
my_rowid(cursor *c, sqlite3_int64 *pRowid) { |
||||
|
*pRowid = c->index; |
||||
|
return SQLITE_OK; |
||||
|
} |
||||
|
|
||||
|
static int |
||||
|
my_bestindex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo) { |
||||
|
return SQLITE_OK; |
||||
|
} |
||||
|
|
||||
|
static const sqlite3_module module = { |
||||
|
0, |
||||
|
my_create, |
||||
|
my_connect, |
||||
|
my_bestindex, |
||||
|
my_disconnect, |
||||
|
my_destroy, |
||||
|
my_open, |
||||
|
(int (*)(sqlite3_vtab_cursor *)) my_close, |
||||
|
(int (*)(sqlite3_vtab_cursor *, int, char const *, int, sqlite3_value **)) my_filter, |
||||
|
(int (*)(sqlite3_vtab_cursor *)) my_next, |
||||
|
(int (*)(sqlite3_vtab_cursor *)) my_eof, |
||||
|
(int (*)(sqlite3_vtab_cursor *, sqlite3_context *, int)) my_column, |
||||
|
(int (*)(sqlite3_vtab_cursor *, sqlite3_int64 *)) my_rowid, |
||||
|
NULL, // my_update
|
||||
|
NULL, // my_begin
|
||||
|
NULL, // my_sync
|
||||
|
NULL, // my_commit
|
||||
|
NULL, // my_rollback
|
||||
|
NULL, // my_findfunction
|
||||
|
NULL, // my_rename
|
||||
|
}; |
||||
|
|
||||
|
static void |
||||
|
destructor(void *arg) { |
||||
|
return; |
||||
|
} |
||||
|
|
||||
|
|
||||
|
extern "C" { |
||||
|
|
||||
|
EXPORT int |
||||
|
sqlite3_extension_init(sqlite3 *db, char **errmsg, const sqlite3_api_routines *api) { |
||||
|
SQLITE_EXTENSION_INIT2(api); |
||||
|
sqlite3_create_module_v2(db, "github", &module, NULL, destructor); |
||||
|
return 0; |
||||
|
} |
||||
|
|
||||
|
} |
@ -0,0 +1,106 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"fmt" |
||||
|
_ "github.com/mattn/go-sqlite3" |
||||
|
"log" |
||||
|
"os" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
os.Remove("./foo.db") |
||||
|
|
||||
|
db, err := sql.Open("sqlite3", "./foo.db") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer db.Close() |
||||
|
|
||||
|
sqlStmt := ` |
||||
|
create table foo (id integer not null primary key, name text); |
||||
|
delete from foo; |
||||
|
` |
||||
|
_, err = db.Exec(sqlStmt) |
||||
|
if err != nil { |
||||
|
log.Printf("%q: %s\n", err, sqlStmt) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
tx, err := db.Begin() |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
stmt, err := tx.Prepare("insert into foo(id, name) values(?, ?)") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer stmt.Close() |
||||
|
for i := 0; i < 100; i++ { |
||||
|
_, err = stmt.Exec(i, fmt.Sprintf("こんにちわ世界%03d", i)) |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
} |
||||
|
tx.Commit() |
||||
|
|
||||
|
rows, err := db.Query("select id, name from foo") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer rows.Close() |
||||
|
for rows.Next() { |
||||
|
var id int |
||||
|
var name string |
||||
|
err = rows.Scan(&id, &name) |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
fmt.Println(id, name) |
||||
|
} |
||||
|
err = rows.Err() |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
stmt, err = db.Prepare("select name from foo where id = ?") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer stmt.Close() |
||||
|
var name string |
||||
|
err = stmt.QueryRow("3").Scan(&name) |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
fmt.Println(name) |
||||
|
|
||||
|
_, err = db.Exec("delete from foo") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
_, err = db.Exec("insert into foo(id, name) values(1, 'foo'), (2, 'bar'), (3, 'baz')") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
rows, err = db.Query("select id, name from foo") |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
defer rows.Close() |
||||
|
for rows.Next() { |
||||
|
var id int |
||||
|
var name string |
||||
|
err = rows.Scan(&id, &name) |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
fmt.Println(id, name) |
||||
|
} |
||||
|
err = rows.Err() |
||||
|
if err != nil { |
||||
|
log.Fatal(err) |
||||
|
} |
||||
|
} |
@ -0,0 +1,74 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
/* |
||||
|
#ifndef USE_LIBSQLITE3 |
||||
|
#include <sqlite3-binding.h> |
||||
|
#else |
||||
|
#include <sqlite3.h> |
||||
|
#endif |
||||
|
#include <stdlib.h> |
||||
|
*/ |
||||
|
import "C" |
||||
|
import ( |
||||
|
"runtime" |
||||
|
"unsafe" |
||||
|
) |
||||
|
|
||||
|
type SQLiteBackup struct { |
||||
|
b *C.sqlite3_backup |
||||
|
} |
||||
|
|
||||
|
func (c *SQLiteConn) Backup(dest string, conn *SQLiteConn, src string) (*SQLiteBackup, error) { |
||||
|
destptr := C.CString(dest) |
||||
|
defer C.free(unsafe.Pointer(destptr)) |
||||
|
srcptr := C.CString(src) |
||||
|
defer C.free(unsafe.Pointer(srcptr)) |
||||
|
|
||||
|
if b := C.sqlite3_backup_init(c.db, destptr, conn.db, srcptr); b != nil { |
||||
|
bb := &SQLiteBackup{b: b} |
||||
|
runtime.SetFinalizer(bb, (*SQLiteBackup).Finish) |
||||
|
return bb, nil |
||||
|
} |
||||
|
return nil, c.lastError() |
||||
|
} |
||||
|
|
||||
|
// Backs up for one step. Calls the underlying `sqlite3_backup_step` function.
|
||||
|
// This function returns a boolean indicating if the backup is done and
|
||||
|
// an error signalling any other error. Done is returned if the underlying C
|
||||
|
// function returns SQLITE_DONE (Code 101)
|
||||
|
func (b *SQLiteBackup) Step(p int) (bool, error) { |
||||
|
ret := C.sqlite3_backup_step(b.b, C.int(p)) |
||||
|
if ret == C.SQLITE_DONE { |
||||
|
return true, nil |
||||
|
} else if ret != 0 && ret != C.SQLITE_LOCKED && ret != C.SQLITE_BUSY { |
||||
|
return false, Error{Code: ErrNo(ret)} |
||||
|
} |
||||
|
return false, nil |
||||
|
} |
||||
|
|
||||
|
func (b *SQLiteBackup) Remaining() int { |
||||
|
return int(C.sqlite3_backup_remaining(b.b)) |
||||
|
} |
||||
|
|
||||
|
func (b *SQLiteBackup) PageCount() int { |
||||
|
return int(C.sqlite3_backup_pagecount(b.b)) |
||||
|
} |
||||
|
|
||||
|
func (b *SQLiteBackup) Finish() error { |
||||
|
return b.Close() |
||||
|
} |
||||
|
|
||||
|
func (b *SQLiteBackup) Close() error { |
||||
|
ret := C.sqlite3_backup_finish(b.b) |
||||
|
if ret != 0 { |
||||
|
return Error{Code: ErrNo(ret)} |
||||
|
} |
||||
|
b.b = nil |
||||
|
runtime.SetFinalizer(b, nil) |
||||
|
return nil |
||||
|
} |
@ -0,0 +1,336 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
// You can't export a Go function to C and have definitions in the C
|
||||
|
// preamble in the same file, so we have to have callbackTrampoline in
|
||||
|
// its own file. Because we need a separate file anyway, the support
|
||||
|
// code for SQLite custom functions is in here.
|
||||
|
|
||||
|
/* |
||||
|
#include <sqlite3-binding.h> |
||||
|
#include <stdlib.h> |
||||
|
|
||||
|
void _sqlite3_result_text(sqlite3_context* ctx, const char* s); |
||||
|
void _sqlite3_result_blob(sqlite3_context* ctx, const void* b, int l); |
||||
|
*/ |
||||
|
import "C" |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"fmt" |
||||
|
"math" |
||||
|
"reflect" |
||||
|
"sync" |
||||
|
"unsafe" |
||||
|
) |
||||
|
|
||||
|
//export callbackTrampoline
|
||||
|
func callbackTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) { |
||||
|
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc] |
||||
|
fi := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*functionInfo) |
||||
|
fi.Call(ctx, args) |
||||
|
} |
||||
|
|
||||
|
//export stepTrampoline
|
||||
|
func stepTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) { |
||||
|
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc] |
||||
|
ai := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*aggInfo) |
||||
|
ai.Step(ctx, args) |
||||
|
} |
||||
|
|
||||
|
//export doneTrampoline
|
||||
|
func doneTrampoline(ctx *C.sqlite3_context) { |
||||
|
handle := uintptr(C.sqlite3_user_data(ctx)) |
||||
|
ai := lookupHandle(handle).(*aggInfo) |
||||
|
ai.Done(ctx) |
||||
|
} |
||||
|
|
||||
|
// Use handles to avoid passing Go pointers to C.
|
||||
|
|
||||
|
type handleVal struct { |
||||
|
db *SQLiteConn |
||||
|
val interface{} |
||||
|
} |
||||
|
|
||||
|
var handleLock sync.Mutex |
||||
|
var handleVals = make(map[uintptr]handleVal) |
||||
|
var handleIndex uintptr = 100 |
||||
|
|
||||
|
func newHandle(db *SQLiteConn, v interface{}) uintptr { |
||||
|
handleLock.Lock() |
||||
|
defer handleLock.Unlock() |
||||
|
i := handleIndex |
||||
|
handleIndex++ |
||||
|
handleVals[i] = handleVal{db, v} |
||||
|
return i |
||||
|
} |
||||
|
|
||||
|
func lookupHandle(handle uintptr) interface{} { |
||||
|
handleLock.Lock() |
||||
|
defer handleLock.Unlock() |
||||
|
r, ok := handleVals[handle] |
||||
|
if !ok { |
||||
|
if handle >= 100 && handle < handleIndex { |
||||
|
panic("deleted handle") |
||||
|
} else { |
||||
|
panic("invalid handle") |
||||
|
} |
||||
|
} |
||||
|
return r.val |
||||
|
} |
||||
|
|
||||
|
func deleteHandles(db *SQLiteConn) { |
||||
|
handleLock.Lock() |
||||
|
defer handleLock.Unlock() |
||||
|
for handle, val := range handleVals { |
||||
|
if val.db == db { |
||||
|
delete(handleVals, handle) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// This is only here so that tests can refer to it.
|
||||
|
type callbackArgRaw C.sqlite3_value |
||||
|
|
||||
|
type callbackArgConverter func(*C.sqlite3_value) (reflect.Value, error) |
||||
|
|
||||
|
type callbackArgCast struct { |
||||
|
f callbackArgConverter |
||||
|
typ reflect.Type |
||||
|
} |
||||
|
|
||||
|
func (c callbackArgCast) Run(v *C.sqlite3_value) (reflect.Value, error) { |
||||
|
val, err := c.f(v) |
||||
|
if err != nil { |
||||
|
return reflect.Value{}, err |
||||
|
} |
||||
|
if !val.Type().ConvertibleTo(c.typ) { |
||||
|
return reflect.Value{}, fmt.Errorf("cannot convert %s to %s", val.Type(), c.typ) |
||||
|
} |
||||
|
return val.Convert(c.typ), nil |
||||
|
} |
||||
|
|
||||
|
func callbackArgInt64(v *C.sqlite3_value) (reflect.Value, error) { |
||||
|
if C.sqlite3_value_type(v) != C.SQLITE_INTEGER { |
||||
|
return reflect.Value{}, fmt.Errorf("argument must be an INTEGER") |
||||
|
} |
||||
|
return reflect.ValueOf(int64(C.sqlite3_value_int64(v))), nil |
||||
|
} |
||||
|
|
||||
|
func callbackArgBool(v *C.sqlite3_value) (reflect.Value, error) { |
||||
|
if C.sqlite3_value_type(v) != C.SQLITE_INTEGER { |
||||
|
return reflect.Value{}, fmt.Errorf("argument must be an INTEGER") |
||||
|
} |
||||
|
i := int64(C.sqlite3_value_int64(v)) |
||||
|
val := false |
||||
|
if i != 0 { |
||||
|
val = true |
||||
|
} |
||||
|
return reflect.ValueOf(val), nil |
||||
|
} |
||||
|
|
||||
|
func callbackArgFloat64(v *C.sqlite3_value) (reflect.Value, error) { |
||||
|
if C.sqlite3_value_type(v) != C.SQLITE_FLOAT { |
||||
|
return reflect.Value{}, fmt.Errorf("argument must be a FLOAT") |
||||
|
} |
||||
|
return reflect.ValueOf(float64(C.sqlite3_value_double(v))), nil |
||||
|
} |
||||
|
|
||||
|
func callbackArgBytes(v *C.sqlite3_value) (reflect.Value, error) { |
||||
|
switch C.sqlite3_value_type(v) { |
||||
|
case C.SQLITE_BLOB: |
||||
|
l := C.sqlite3_value_bytes(v) |
||||
|
p := C.sqlite3_value_blob(v) |
||||
|
return reflect.ValueOf(C.GoBytes(p, l)), nil |
||||
|
case C.SQLITE_TEXT: |
||||
|
l := C.sqlite3_value_bytes(v) |
||||
|
c := unsafe.Pointer(C.sqlite3_value_text(v)) |
||||
|
return reflect.ValueOf(C.GoBytes(c, l)), nil |
||||
|
default: |
||||
|
return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func callbackArgString(v *C.sqlite3_value) (reflect.Value, error) { |
||||
|
switch C.sqlite3_value_type(v) { |
||||
|
case C.SQLITE_BLOB: |
||||
|
l := C.sqlite3_value_bytes(v) |
||||
|
p := (*C.char)(C.sqlite3_value_blob(v)) |
||||
|
return reflect.ValueOf(C.GoStringN(p, l)), nil |
||||
|
case C.SQLITE_TEXT: |
||||
|
c := (*C.char)(unsafe.Pointer(C.sqlite3_value_text(v))) |
||||
|
return reflect.ValueOf(C.GoString(c)), nil |
||||
|
default: |
||||
|
return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func callbackArgGeneric(v *C.sqlite3_value) (reflect.Value, error) { |
||||
|
switch C.sqlite3_value_type(v) { |
||||
|
case C.SQLITE_INTEGER: |
||||
|
return callbackArgInt64(v) |
||||
|
case C.SQLITE_FLOAT: |
||||
|
return callbackArgFloat64(v) |
||||
|
case C.SQLITE_TEXT: |
||||
|
return callbackArgString(v) |
||||
|
case C.SQLITE_BLOB: |
||||
|
return callbackArgBytes(v) |
||||
|
case C.SQLITE_NULL: |
||||
|
// Interpret NULL as a nil byte slice.
|
||||
|
var ret []byte |
||||
|
return reflect.ValueOf(ret), nil |
||||
|
default: |
||||
|
panic("unreachable") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func callbackArg(typ reflect.Type) (callbackArgConverter, error) { |
||||
|
switch typ.Kind() { |
||||
|
case reflect.Interface: |
||||
|
if typ.NumMethod() != 0 { |
||||
|
return nil, errors.New("the only supported interface type is interface{}") |
||||
|
} |
||||
|
return callbackArgGeneric, nil |
||||
|
case reflect.Slice: |
||||
|
if typ.Elem().Kind() != reflect.Uint8 { |
||||
|
return nil, errors.New("the only supported slice type is []byte") |
||||
|
} |
||||
|
return callbackArgBytes, nil |
||||
|
case reflect.String: |
||||
|
return callbackArgString, nil |
||||
|
case reflect.Bool: |
||||
|
return callbackArgBool, nil |
||||
|
case reflect.Int64: |
||||
|
return callbackArgInt64, nil |
||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint: |
||||
|
c := callbackArgCast{callbackArgInt64, typ} |
||||
|
return c.Run, nil |
||||
|
case reflect.Float64: |
||||
|
return callbackArgFloat64, nil |
||||
|
case reflect.Float32: |
||||
|
c := callbackArgCast{callbackArgFloat64, typ} |
||||
|
return c.Run, nil |
||||
|
default: |
||||
|
return nil, fmt.Errorf("don't know how to convert to %s", typ) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func callbackConvertArgs(argv []*C.sqlite3_value, converters []callbackArgConverter, variadic callbackArgConverter) ([]reflect.Value, error) { |
||||
|
var args []reflect.Value |
||||
|
|
||||
|
if len(argv) < len(converters) { |
||||
|
return nil, fmt.Errorf("function requires at least %d arguments", len(converters)) |
||||
|
} |
||||
|
|
||||
|
for i, arg := range argv[:len(converters)] { |
||||
|
v, err := converters[i](arg) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
args = append(args, v) |
||||
|
} |
||||
|
|
||||
|
if variadic != nil { |
||||
|
for _, arg := range argv[len(converters):] { |
||||
|
v, err := variadic(arg) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
args = append(args, v) |
||||
|
} |
||||
|
} |
||||
|
return args, nil |
||||
|
} |
||||
|
|
||||
|
type callbackRetConverter func(*C.sqlite3_context, reflect.Value) error |
||||
|
|
||||
|
func callbackRetInteger(ctx *C.sqlite3_context, v reflect.Value) error { |
||||
|
switch v.Type().Kind() { |
||||
|
case reflect.Int64: |
||||
|
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint: |
||||
|
v = v.Convert(reflect.TypeOf(int64(0))) |
||||
|
case reflect.Bool: |
||||
|
b := v.Interface().(bool) |
||||
|
if b { |
||||
|
v = reflect.ValueOf(int64(1)) |
||||
|
} else { |
||||
|
v = reflect.ValueOf(int64(0)) |
||||
|
} |
||||
|
default: |
||||
|
return fmt.Errorf("cannot convert %s to INTEGER", v.Type()) |
||||
|
} |
||||
|
|
||||
|
C.sqlite3_result_int64(ctx, C.sqlite3_int64(v.Interface().(int64))) |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func callbackRetFloat(ctx *C.sqlite3_context, v reflect.Value) error { |
||||
|
switch v.Type().Kind() { |
||||
|
case reflect.Float64: |
||||
|
case reflect.Float32: |
||||
|
v = v.Convert(reflect.TypeOf(float64(0))) |
||||
|
default: |
||||
|
return fmt.Errorf("cannot convert %s to FLOAT", v.Type()) |
||||
|
} |
||||
|
|
||||
|
C.sqlite3_result_double(ctx, C.double(v.Interface().(float64))) |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func callbackRetBlob(ctx *C.sqlite3_context, v reflect.Value) error { |
||||
|
if v.Type().Kind() != reflect.Slice || v.Type().Elem().Kind() != reflect.Uint8 { |
||||
|
return fmt.Errorf("cannot convert %s to BLOB", v.Type()) |
||||
|
} |
||||
|
i := v.Interface() |
||||
|
if i == nil || len(i.([]byte)) == 0 { |
||||
|
C.sqlite3_result_null(ctx) |
||||
|
} else { |
||||
|
bs := i.([]byte) |
||||
|
C._sqlite3_result_blob(ctx, unsafe.Pointer(&bs[0]), C.int(len(bs))) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error { |
||||
|
if v.Type().Kind() != reflect.String { |
||||
|
return fmt.Errorf("cannot convert %s to TEXT", v.Type()) |
||||
|
} |
||||
|
C._sqlite3_result_text(ctx, C.CString(v.Interface().(string))) |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func callbackRet(typ reflect.Type) (callbackRetConverter, error) { |
||||
|
switch typ.Kind() { |
||||
|
case reflect.Slice: |
||||
|
if typ.Elem().Kind() != reflect.Uint8 { |
||||
|
return nil, errors.New("the only supported slice type is []byte") |
||||
|
} |
||||
|
return callbackRetBlob, nil |
||||
|
case reflect.String: |
||||
|
return callbackRetText, nil |
||||
|
case reflect.Bool, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint: |
||||
|
return callbackRetInteger, nil |
||||
|
case reflect.Float32, reflect.Float64: |
||||
|
return callbackRetFloat, nil |
||||
|
default: |
||||
|
return nil, fmt.Errorf("don't know how to convert to %s", typ) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func callbackError(ctx *C.sqlite3_context, err error) { |
||||
|
cstr := C.CString(err.Error()) |
||||
|
defer C.free(unsafe.Pointer(cstr)) |
||||
|
C.sqlite3_result_error(ctx, cstr, -1) |
||||
|
} |
||||
|
|
||||
|
// Test support code. Tests are not allowed to import "C", so we can't
|
||||
|
// declare any functions that use C.sqlite3_value.
|
||||
|
func callbackSyntheticForTests(v reflect.Value, err error) callbackArgConverter { |
||||
|
return func(*C.sqlite3_value) (reflect.Value, error) { |
||||
|
return v, err |
||||
|
} |
||||
|
} |
@ -0,0 +1,97 @@ |
|||||
|
package sqlite3 |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"math" |
||||
|
"reflect" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestCallbackArgCast(t *testing.T) { |
||||
|
intConv := callbackSyntheticForTests(reflect.ValueOf(int64(math.MaxInt64)), nil) |
||||
|
floatConv := callbackSyntheticForTests(reflect.ValueOf(float64(math.MaxFloat64)), nil) |
||||
|
errConv := callbackSyntheticForTests(reflect.Value{}, errors.New("test")) |
||||
|
|
||||
|
tests := []struct { |
||||
|
f callbackArgConverter |
||||
|
o reflect.Value |
||||
|
}{ |
||||
|
{intConv, reflect.ValueOf(int8(-1))}, |
||||
|
{intConv, reflect.ValueOf(int16(-1))}, |
||||
|
{intConv, reflect.ValueOf(int32(-1))}, |
||||
|
{intConv, reflect.ValueOf(uint8(math.MaxUint8))}, |
||||
|
{intConv, reflect.ValueOf(uint16(math.MaxUint16))}, |
||||
|
{intConv, reflect.ValueOf(uint32(math.MaxUint32))}, |
||||
|
// Special case, int64->uint64 is only 1<<63 - 1, not 1<<64 - 1
|
||||
|
{intConv, reflect.ValueOf(uint64(math.MaxInt64))}, |
||||
|
{floatConv, reflect.ValueOf(float32(math.Inf(1)))}, |
||||
|
} |
||||
|
|
||||
|
for _, test := range tests { |
||||
|
conv := callbackArgCast{test.f, test.o.Type()} |
||||
|
val, err := conv.Run(nil) |
||||
|
if err != nil { |
||||
|
t.Errorf("Couldn't convert to %s: %s", test.o.Type(), err) |
||||
|
} else if !reflect.DeepEqual(val.Interface(), test.o.Interface()) { |
||||
|
t.Errorf("Unexpected result from converting to %s: got %v, want %v", test.o.Type(), val.Interface(), test.o.Interface()) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
conv := callbackArgCast{errConv, reflect.TypeOf(int8(0))} |
||||
|
_, err := conv.Run(nil) |
||||
|
if err == nil { |
||||
|
t.Errorf("Expected error during callbackArgCast, but got none") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestCallbackConverters(t *testing.T) { |
||||
|
tests := []struct { |
||||
|
v interface{} |
||||
|
err bool |
||||
|
}{ |
||||
|
// Unfortunately, we can't tell which converter was returned,
|
||||
|
// but we can at least check which types can be converted.
|
||||
|
{[]byte{0}, false}, |
||||
|
{"text", false}, |
||||
|
{true, false}, |
||||
|
{int8(0), false}, |
||||
|
{int16(0), false}, |
||||
|
{int32(0), false}, |
||||
|
{int64(0), false}, |
||||
|
{uint8(0), false}, |
||||
|
{uint16(0), false}, |
||||
|
{uint32(0), false}, |
||||
|
{uint64(0), false}, |
||||
|
{int(0), false}, |
||||
|
{uint(0), false}, |
||||
|
{float64(0), false}, |
||||
|
{float32(0), false}, |
||||
|
|
||||
|
{func() {}, true}, |
||||
|
{complex64(complex(0, 0)), true}, |
||||
|
{complex128(complex(0, 0)), true}, |
||||
|
{struct{}{}, true}, |
||||
|
{map[string]string{}, true}, |
||||
|
{[]string{}, true}, |
||||
|
{(*int8)(nil), true}, |
||||
|
{make(chan int), true}, |
||||
|
} |
||||
|
|
||||
|
for _, test := range tests { |
||||
|
_, err := callbackArg(reflect.TypeOf(test.v)) |
||||
|
if test.err && err == nil { |
||||
|
t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v)) |
||||
|
} else if !test.err && err != nil { |
||||
|
t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
for _, test := range tests { |
||||
|
_, err := callbackRet(reflect.TypeOf(test.v)) |
||||
|
if test.err && err == nil { |
||||
|
t.Errorf("Expected an error when converting %s, got no error", reflect.TypeOf(test.v)) |
||||
|
} else if !test.err && err != nil { |
||||
|
t.Errorf("Expected converter when converting %s, got error: %s", reflect.TypeOf(test.v), err) |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,112 @@ |
|||||
|
/* |
||||
|
Package sqlite3 provides interface to SQLite3 databases. |
||||
|
|
||||
|
This works as a driver for database/sql. |
||||
|
|
||||
|
Installation |
||||
|
|
||||
|
go get github.com/mattn/go-sqlite3 |
||||
|
|
||||
|
Supported Types |
||||
|
|
||||
|
Currently, go-sqlite3 supports the following data types. |
||||
|
|
||||
|
+------------------------------+ |
||||
|
|go | sqlite3 | |
||||
|
|----------|-------------------| |
||||
|
|nil | null | |
||||
|
|int | integer | |
||||
|
|int64 | integer | |
||||
|
|float64 | float | |
||||
|
|bool | integer | |
||||
|
|[]byte | blob | |
||||
|
|string | text | |
||||
|
|time.Time | timestamp/datetime| |
||||
|
+------------------------------+ |
||||
|
|
||||
|
SQLite3 Extension |
||||
|
|
||||
|
You can write your own extension module for sqlite3. For example, below is an |
||||
|
extension for a Regexp matcher operation. |
||||
|
|
||||
|
#include <pcre.h> |
||||
|
#include <string.h> |
||||
|
#include <stdio.h> |
||||
|
#include <sqlite3ext.h> |
||||
|
|
||||
|
SQLITE_EXTENSION_INIT1 |
||||
|
static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) { |
||||
|
if (argc >= 2) { |
||||
|
const char *target = (const char *)sqlite3_value_text(argv[1]); |
||||
|
const char *pattern = (const char *)sqlite3_value_text(argv[0]); |
||||
|
const char* errstr = NULL; |
||||
|
int erroff = 0; |
||||
|
int vec[500]; |
||||
|
int n, rc; |
||||
|
pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL); |
||||
|
rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); |
||||
|
if (rc <= 0) { |
||||
|
sqlite3_result_error(context, errstr, 0); |
||||
|
return; |
||||
|
} |
||||
|
sqlite3_result_int(context, 1); |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
#ifdef _WIN32 |
||||
|
__declspec(dllexport) |
||||
|
#endif |
||||
|
int sqlite3_extension_init(sqlite3 *db, char **errmsg, |
||||
|
const sqlite3_api_routines *api) { |
||||
|
SQLITE_EXTENSION_INIT2(api); |
||||
|
return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8, |
||||
|
(void*)db, regexp_func, NULL, NULL); |
||||
|
} |
||||
|
|
||||
|
It needs to be built as a so/dll shared library. And you need to register |
||||
|
the extension module like below. |
||||
|
|
||||
|
sql.Register("sqlite3_with_extensions", |
||||
|
&sqlite3.SQLiteDriver{ |
||||
|
Extensions: []string{ |
||||
|
"sqlite3_mod_regexp", |
||||
|
}, |
||||
|
}) |
||||
|
|
||||
|
Then, you can use this extension. |
||||
|
|
||||
|
rows, err := db.Query("select text from mytable where name regexp '^golang'") |
||||
|
|
||||
|
Connection Hook |
||||
|
|
||||
|
You can hook and inject your code when the connection is established. database/sql |
||||
|
doesn't provide a way to get native go-sqlite3 interfaces. So if you want, |
||||
|
you need to set ConnectHook and get the SQLiteConn. |
||||
|
|
||||
|
sql.Register("sqlite3_with_hook_example", |
||||
|
&sqlite3.SQLiteDriver{ |
||||
|
ConnectHook: func(conn *sqlite3.SQLiteConn) error { |
||||
|
sqlite3conn = append(sqlite3conn, conn) |
||||
|
return nil |
||||
|
}, |
||||
|
}) |
||||
|
|
||||
|
Go SQlite3 Extensions |
||||
|
|
||||
|
If you want to register Go functions as SQLite extension functions, |
||||
|
call RegisterFunction from ConnectHook. |
||||
|
|
||||
|
regex = func(re, s string) (bool, error) { |
||||
|
return regexp.MatchString(re, s) |
||||
|
} |
||||
|
sql.Register("sqlite3_with_go_func", |
||||
|
&sqlite3.SQLiteDriver{ |
||||
|
ConnectHook: func(conn *sqlite3.SQLiteConn) error { |
||||
|
return conn.RegisterFunc("regexp", regex, true) |
||||
|
}, |
||||
|
}) |
||||
|
|
||||
|
See the documentation of RegisterFunc for more details. |
||||
|
|
||||
|
*/ |
||||
|
package sqlite3 |
@ -0,0 +1,128 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
import "C" |
||||
|
|
||||
|
type ErrNo int |
||||
|
|
||||
|
const ErrNoMask C.int = 0xff |
||||
|
|
||||
|
type ErrNoExtended int |
||||
|
|
||||
|
type Error struct { |
||||
|
Code ErrNo /* The error code returned by SQLite */ |
||||
|
ExtendedCode ErrNoExtended /* The extended error code returned by SQLite */ |
||||
|
err string /* The error string returned by sqlite3_errmsg(), |
||||
|
this usually contains more specific details. */ |
||||
|
} |
||||
|
|
||||
|
// result codes from http://www.sqlite.org/c3ref/c_abort.html
|
||||
|
var ( |
||||
|
ErrError = ErrNo(1) /* SQL error or missing database */ |
||||
|
ErrInternal = ErrNo(2) /* Internal logic error in SQLite */ |
||||
|
ErrPerm = ErrNo(3) /* Access permission denied */ |
||||
|
ErrAbort = ErrNo(4) /* Callback routine requested an abort */ |
||||
|
ErrBusy = ErrNo(5) /* The database file is locked */ |
||||
|
ErrLocked = ErrNo(6) /* A table in the database is locked */ |
||||
|
ErrNomem = ErrNo(7) /* A malloc() failed */ |
||||
|
ErrReadonly = ErrNo(8) /* Attempt to write a readonly database */ |
||||
|
ErrInterrupt = ErrNo(9) /* Operation terminated by sqlite3_interrupt() */ |
||||
|
ErrIoErr = ErrNo(10) /* Some kind of disk I/O error occurred */ |
||||
|
ErrCorrupt = ErrNo(11) /* The database disk image is malformed */ |
||||
|
ErrNotFound = ErrNo(12) /* Unknown opcode in sqlite3_file_control() */ |
||||
|
ErrFull = ErrNo(13) /* Insertion failed because database is full */ |
||||
|
ErrCantOpen = ErrNo(14) /* Unable to open the database file */ |
||||
|
ErrProtocol = ErrNo(15) /* Database lock protocol error */ |
||||
|
ErrEmpty = ErrNo(16) /* Database is empty */ |
||||
|
ErrSchema = ErrNo(17) /* The database schema changed */ |
||||
|
ErrTooBig = ErrNo(18) /* String or BLOB exceeds size limit */ |
||||
|
ErrConstraint = ErrNo(19) /* Abort due to constraint violation */ |
||||
|
ErrMismatch = ErrNo(20) /* Data type mismatch */ |
||||
|
ErrMisuse = ErrNo(21) /* Library used incorrectly */ |
||||
|
ErrNoLFS = ErrNo(22) /* Uses OS features not supported on host */ |
||||
|
ErrAuth = ErrNo(23) /* Authorization denied */ |
||||
|
ErrFormat = ErrNo(24) /* Auxiliary database format error */ |
||||
|
ErrRange = ErrNo(25) /* 2nd parameter to sqlite3_bind out of range */ |
||||
|
ErrNotADB = ErrNo(26) /* File opened that is not a database file */ |
||||
|
ErrNotice = ErrNo(27) /* Notifications from sqlite3_log() */ |
||||
|
ErrWarning = ErrNo(28) /* Warnings from sqlite3_log() */ |
||||
|
) |
||||
|
|
||||
|
func (err ErrNo) Error() string { |
||||
|
return Error{Code: err}.Error() |
||||
|
} |
||||
|
|
||||
|
func (err ErrNo) Extend(by int) ErrNoExtended { |
||||
|
return ErrNoExtended(int(err) | (by << 8)) |
||||
|
} |
||||
|
|
||||
|
func (err ErrNoExtended) Error() string { |
||||
|
return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error() |
||||
|
} |
||||
|
|
||||
|
func (err Error) Error() string { |
||||
|
if err.err != "" { |
||||
|
return err.err |
||||
|
} |
||||
|
return errorString(err) |
||||
|
} |
||||
|
|
||||
|
// result codes from http://www.sqlite.org/c3ref/c_abort_rollback.html
|
||||
|
var ( |
||||
|
ErrIoErrRead = ErrIoErr.Extend(1) |
||||
|
ErrIoErrShortRead = ErrIoErr.Extend(2) |
||||
|
ErrIoErrWrite = ErrIoErr.Extend(3) |
||||
|
ErrIoErrFsync = ErrIoErr.Extend(4) |
||||
|
ErrIoErrDirFsync = ErrIoErr.Extend(5) |
||||
|
ErrIoErrTruncate = ErrIoErr.Extend(6) |
||||
|
ErrIoErrFstat = ErrIoErr.Extend(7) |
||||
|
ErrIoErrUnlock = ErrIoErr.Extend(8) |
||||
|
ErrIoErrRDlock = ErrIoErr.Extend(9) |
||||
|
ErrIoErrDelete = ErrIoErr.Extend(10) |
||||
|
ErrIoErrBlocked = ErrIoErr.Extend(11) |
||||
|
ErrIoErrNoMem = ErrIoErr.Extend(12) |
||||
|
ErrIoErrAccess = ErrIoErr.Extend(13) |
||||
|
ErrIoErrCheckReservedLock = ErrIoErr.Extend(14) |
||||
|
ErrIoErrLock = ErrIoErr.Extend(15) |
||||
|
ErrIoErrClose = ErrIoErr.Extend(16) |
||||
|
ErrIoErrDirClose = ErrIoErr.Extend(17) |
||||
|
ErrIoErrSHMOpen = ErrIoErr.Extend(18) |
||||
|
ErrIoErrSHMSize = ErrIoErr.Extend(19) |
||||
|
ErrIoErrSHMLock = ErrIoErr.Extend(20) |
||||
|
ErrIoErrSHMMap = ErrIoErr.Extend(21) |
||||
|
ErrIoErrSeek = ErrIoErr.Extend(22) |
||||
|
ErrIoErrDeleteNoent = ErrIoErr.Extend(23) |
||||
|
ErrIoErrMMap = ErrIoErr.Extend(24) |
||||
|
ErrIoErrGetTempPath = ErrIoErr.Extend(25) |
||||
|
ErrIoErrConvPath = ErrIoErr.Extend(26) |
||||
|
ErrLockedSharedCache = ErrLocked.Extend(1) |
||||
|
ErrBusyRecovery = ErrBusy.Extend(1) |
||||
|
ErrBusySnapshot = ErrBusy.Extend(2) |
||||
|
ErrCantOpenNoTempDir = ErrCantOpen.Extend(1) |
||||
|
ErrCantOpenIsDir = ErrCantOpen.Extend(2) |
||||
|
ErrCantOpenFullPath = ErrCantOpen.Extend(3) |
||||
|
ErrCantOpenConvPath = ErrCantOpen.Extend(4) |
||||
|
ErrCorruptVTab = ErrCorrupt.Extend(1) |
||||
|
ErrReadonlyRecovery = ErrReadonly.Extend(1) |
||||
|
ErrReadonlyCantLock = ErrReadonly.Extend(2) |
||||
|
ErrReadonlyRollback = ErrReadonly.Extend(3) |
||||
|
ErrReadonlyDbMoved = ErrReadonly.Extend(4) |
||||
|
ErrAbortRollback = ErrAbort.Extend(2) |
||||
|
ErrConstraintCheck = ErrConstraint.Extend(1) |
||||
|
ErrConstraintCommitHook = ErrConstraint.Extend(2) |
||||
|
ErrConstraintForeignKey = ErrConstraint.Extend(3) |
||||
|
ErrConstraintFunction = ErrConstraint.Extend(4) |
||||
|
ErrConstraintNotNull = ErrConstraint.Extend(5) |
||||
|
ErrConstraintPrimaryKey = ErrConstraint.Extend(6) |
||||
|
ErrConstraintTrigger = ErrConstraint.Extend(7) |
||||
|
ErrConstraintUnique = ErrConstraint.Extend(8) |
||||
|
ErrConstraintVTab = ErrConstraint.Extend(9) |
||||
|
ErrConstraintRowId = ErrConstraint.Extend(10) |
||||
|
ErrNoticeRecoverWAL = ErrNotice.Extend(1) |
||||
|
ErrNoticeRecoverRollback = ErrNotice.Extend(2) |
||||
|
ErrWarningAutoIndex = ErrWarning.Extend(1) |
||||
|
) |
@ -0,0 +1,242 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"io/ioutil" |
||||
|
"os" |
||||
|
"path" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestSimpleError(t *testing.T) { |
||||
|
e := ErrError.Error() |
||||
|
if e != "SQL logic error or missing database" { |
||||
|
t.Error("wrong error code:" + e) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDbErrors(t *testing.T) { |
||||
|
dirName, err := ioutil.TempDir("", "sqlite3") |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
defer os.RemoveAll(dirName) |
||||
|
|
||||
|
dbFileName := path.Join(dirName, "test.db") |
||||
|
f, err := os.Create(dbFileName) |
||||
|
if err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
f.Write([]byte{1, 2, 3, 4, 5}) |
||||
|
f.Close() |
||||
|
|
||||
|
db, err := sql.Open("sqlite3", dbFileName) |
||||
|
if err == nil { |
||||
|
_, err = db.Exec("drop table foo") |
||||
|
} |
||||
|
|
||||
|
sqliteErr := err.(Error) |
||||
|
if sqliteErr.Code != ErrNotADB { |
||||
|
t.Error("wrong error code for corrupted DB") |
||||
|
} |
||||
|
if err.Error() == "" { |
||||
|
t.Error("wrong error string for corrupted DB") |
||||
|
} |
||||
|
db.Close() |
||||
|
} |
||||
|
|
||||
|
func TestSqlLogicErrors(t *testing.T) { |
||||
|
dirName, err := ioutil.TempDir("", "sqlite3") |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
defer os.RemoveAll(dirName) |
||||
|
|
||||
|
dbFileName := path.Join(dirName, "test.db") |
||||
|
db, err := sql.Open("sqlite3", dbFileName) |
||||
|
if err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
defer db.Close() |
||||
|
|
||||
|
_, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)") |
||||
|
if err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
|
||||
|
const expectedErr = "table Foo already exists" |
||||
|
_, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)") |
||||
|
if err.Error() != expectedErr { |
||||
|
t.Errorf("Unexpected error: %s, expected %s", err.Error(), expectedErr) |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func TestExtendedErrorCodes_ForeignKey(t *testing.T) { |
||||
|
dirName, err := ioutil.TempDir("", "sqlite3-err") |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
defer os.RemoveAll(dirName) |
||||
|
|
||||
|
dbFileName := path.Join(dirName, "test.db") |
||||
|
db, err := sql.Open("sqlite3", dbFileName) |
||||
|
if err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
defer db.Close() |
||||
|
|
||||
|
_, err = db.Exec("PRAGMA foreign_keys=ON;") |
||||
|
if err != nil { |
||||
|
t.Errorf("PRAGMA foreign_keys=ON: %v", err) |
||||
|
} |
||||
|
|
||||
|
_, err = db.Exec(`CREATE TABLE Foo ( |
||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT, |
||||
|
value INTEGER NOT NULL, |
||||
|
ref INTEGER NULL REFERENCES Foo (id), |
||||
|
UNIQUE(value) |
||||
|
);`) |
||||
|
if err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
|
||||
|
_, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (100, 100);") |
||||
|
if err == nil { |
||||
|
t.Error("No error!") |
||||
|
} else { |
||||
|
sqliteErr := err.(Error) |
||||
|
if sqliteErr.Code != ErrConstraint { |
||||
|
t.Errorf("Wrong basic error code: %d != %d", |
||||
|
sqliteErr.Code, ErrConstraint) |
||||
|
} |
||||
|
if sqliteErr.ExtendedCode != ErrConstraintForeignKey { |
||||
|
t.Errorf("Wrong extended error code: %d != %d", |
||||
|
sqliteErr.ExtendedCode, ErrConstraintForeignKey) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func TestExtendedErrorCodes_NotNull(t *testing.T) { |
||||
|
dirName, err := ioutil.TempDir("", "sqlite3-err") |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
defer os.RemoveAll(dirName) |
||||
|
|
||||
|
dbFileName := path.Join(dirName, "test.db") |
||||
|
db, err := sql.Open("sqlite3", dbFileName) |
||||
|
if err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
defer db.Close() |
||||
|
|
||||
|
_, err = db.Exec("PRAGMA foreign_keys=ON;") |
||||
|
if err != nil { |
||||
|
t.Errorf("PRAGMA foreign_keys=ON: %v", err) |
||||
|
} |
||||
|
|
||||
|
_, err = db.Exec(`CREATE TABLE Foo ( |
||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT, |
||||
|
value INTEGER NOT NULL, |
||||
|
ref INTEGER NULL REFERENCES Foo (id), |
||||
|
UNIQUE(value) |
||||
|
);`) |
||||
|
if err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
|
||||
|
res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);") |
||||
|
if err != nil { |
||||
|
t.Fatalf("Creating first row: %v", err) |
||||
|
} |
||||
|
|
||||
|
id, err := res.LastInsertId() |
||||
|
if err != nil { |
||||
|
t.Fatalf("Retrieving last insert id: %v", err) |
||||
|
} |
||||
|
|
||||
|
_, err = db.Exec("INSERT INTO Foo (ref) VALUES (?);", id) |
||||
|
if err == nil { |
||||
|
t.Error("No error!") |
||||
|
} else { |
||||
|
sqliteErr := err.(Error) |
||||
|
if sqliteErr.Code != ErrConstraint { |
||||
|
t.Errorf("Wrong basic error code: %d != %d", |
||||
|
sqliteErr.Code, ErrConstraint) |
||||
|
} |
||||
|
if sqliteErr.ExtendedCode != ErrConstraintNotNull { |
||||
|
t.Errorf("Wrong extended error code: %d != %d", |
||||
|
sqliteErr.ExtendedCode, ErrConstraintNotNull) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func TestExtendedErrorCodes_Unique(t *testing.T) { |
||||
|
dirName, err := ioutil.TempDir("", "sqlite3-err") |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
defer os.RemoveAll(dirName) |
||||
|
|
||||
|
dbFileName := path.Join(dirName, "test.db") |
||||
|
db, err := sql.Open("sqlite3", dbFileName) |
||||
|
if err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
defer db.Close() |
||||
|
|
||||
|
_, err = db.Exec("PRAGMA foreign_keys=ON;") |
||||
|
if err != nil { |
||||
|
t.Errorf("PRAGMA foreign_keys=ON: %v", err) |
||||
|
} |
||||
|
|
||||
|
_, err = db.Exec(`CREATE TABLE Foo ( |
||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT, |
||||
|
value INTEGER NOT NULL, |
||||
|
ref INTEGER NULL REFERENCES Foo (id), |
||||
|
UNIQUE(value) |
||||
|
);`) |
||||
|
if err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
|
||||
|
res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);") |
||||
|
if err != nil { |
||||
|
t.Fatalf("Creating first row: %v", err) |
||||
|
} |
||||
|
|
||||
|
id, err := res.LastInsertId() |
||||
|
if err != nil { |
||||
|
t.Fatalf("Retrieving last insert id: %v", err) |
||||
|
} |
||||
|
|
||||
|
_, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (?, 100);", id) |
||||
|
if err == nil { |
||||
|
t.Error("No error!") |
||||
|
} else { |
||||
|
sqliteErr := err.(Error) |
||||
|
if sqliteErr.Code != ErrConstraint { |
||||
|
t.Errorf("Wrong basic error code: %d != %d", |
||||
|
sqliteErr.Code, ErrConstraint) |
||||
|
} |
||||
|
if sqliteErr.ExtendedCode != ErrConstraintUnique { |
||||
|
t.Errorf("Wrong extended error code: %d != %d", |
||||
|
sqliteErr.ExtendedCode, ErrConstraintUnique) |
||||
|
} |
||||
|
extended := sqliteErr.Code.Extend(3).Error() |
||||
|
expected := "constraint failed" |
||||
|
if extended != expected { |
||||
|
t.Errorf("Wrong basic error code: %q != %q", |
||||
|
extended, expected) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
} |
189319
vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.c
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
8733
vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.h
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
1012
vendor/src/github.com/mattn/go-sqlite3/sqlite3.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,130 @@ |
|||||
|
// Copyright (C) 2015 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"os" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestFTS3(t *testing.T) { |
||||
|
tempFilename := TempFilename(t) |
||||
|
defer os.Remove(tempFilename) |
||||
|
db, err := sql.Open("sqlite3", tempFilename) |
||||
|
if err != nil { |
||||
|
t.Fatal("Failed to open database:", err) |
||||
|
} |
||||
|
defer db.Close() |
||||
|
|
||||
|
_, err = db.Exec("DROP TABLE foo") |
||||
|
_, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts3(id INTEGER PRIMARY KEY, value TEXT)") |
||||
|
if err != nil { |
||||
|
t.Fatal("Failed to create table:", err) |
||||
|
} |
||||
|
|
||||
|
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `今日の 晩御飯は 天麩羅よ`) |
||||
|
if err != nil { |
||||
|
t.Fatal("Failed to insert value:", err) |
||||
|
} |
||||
|
|
||||
|
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 2, `今日は いい 天気だ`) |
||||
|
if err != nil { |
||||
|
t.Fatal("Failed to insert value:", err) |
||||
|
} |
||||
|
|
||||
|
rows, err := db.Query("SELECT id, value FROM foo WHERE value MATCH '今日* 天*'") |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to query foo table:", err) |
||||
|
} |
||||
|
defer rows.Close() |
||||
|
|
||||
|
for rows.Next() { |
||||
|
var id int |
||||
|
var value string |
||||
|
|
||||
|
if err := rows.Scan(&id, &value); err != nil { |
||||
|
t.Error("Unable to scan results:", err) |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
if id == 1 && value != `今日の 晩御飯は 天麩羅よ` { |
||||
|
t.Error("Value for id 1 should be `今日の 晩御飯は 天麩羅よ`, but:", value) |
||||
|
} else if id == 2 && value != `今日は いい 天気だ` { |
||||
|
t.Error("Value for id 2 should be `今日は いい 天気だ`, but:", value) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
rows, err = db.Query("SELECT value FROM foo WHERE value MATCH '今日* 天麩羅*'") |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to query foo table:", err) |
||||
|
} |
||||
|
defer rows.Close() |
||||
|
|
||||
|
var value string |
||||
|
if !rows.Next() { |
||||
|
t.Fatal("Result should be only one") |
||||
|
} |
||||
|
|
||||
|
if err := rows.Scan(&value); err != nil { |
||||
|
t.Fatal("Unable to scan results:", err) |
||||
|
} |
||||
|
|
||||
|
if value != `今日の 晩御飯は 天麩羅よ` { |
||||
|
t.Fatal("Value should be `今日の 晩御飯は 天麩羅よ`, but:", value) |
||||
|
} |
||||
|
|
||||
|
if rows.Next() { |
||||
|
t.Fatal("Result should be only one") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestFTS4(t *testing.T) { |
||||
|
tempFilename := TempFilename(t) |
||||
|
defer os.Remove(tempFilename) |
||||
|
db, err := sql.Open("sqlite3", tempFilename) |
||||
|
if err != nil { |
||||
|
t.Fatal("Failed to open database:", err) |
||||
|
} |
||||
|
defer db.Close() |
||||
|
|
||||
|
_, err = db.Exec("DROP TABLE foo") |
||||
|
_, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts4(tokenize=unicode61, id INTEGER PRIMARY KEY, value TEXT)") |
||||
|
switch { |
||||
|
case err != nil && err.Error() == "unknown tokenizer: unicode61": |
||||
|
t.Skip("FTS4 not supported") |
||||
|
case err != nil: |
||||
|
t.Fatal("Failed to create table:", err) |
||||
|
} |
||||
|
|
||||
|
_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `février`) |
||||
|
if err != nil { |
||||
|
t.Fatal("Failed to insert value:", err) |
||||
|
} |
||||
|
|
||||
|
rows, err := db.Query("SELECT value FROM foo WHERE value MATCH 'fevrier'") |
||||
|
if err != nil { |
||||
|
t.Fatal("Unable to query foo table:", err) |
||||
|
} |
||||
|
defer rows.Close() |
||||
|
|
||||
|
var value string |
||||
|
if !rows.Next() { |
||||
|
t.Fatal("Result should be only one") |
||||
|
} |
||||
|
|
||||
|
if err := rows.Scan(&value); err != nil { |
||||
|
t.Fatal("Unable to scan results:", err) |
||||
|
} |
||||
|
|
||||
|
if value != `février` { |
||||
|
t.Fatal("Value should be `février`, but:", value) |
||||
|
} |
||||
|
|
||||
|
if rows.Next() { |
||||
|
t.Fatal("Result should be only one") |
||||
|
} |
||||
|
} |
@ -0,0 +1,13 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
// +build fts5
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
/* |
||||
|
#cgo CFLAGS: -DSQLITE_ENABLE_FTS5 |
||||
|
#cgo LDFLAGS: -lm |
||||
|
*/ |
||||
|
import "C" |
@ -0,0 +1,13 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
// +build icu
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
/* |
||||
|
#cgo LDFLAGS: -licuuc -licui18n |
||||
|
#cgo CFLAGS: -DSQLITE_ENABLE_ICU |
||||
|
*/ |
||||
|
import "C" |
@ -0,0 +1,12 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
// +build json1
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
/* |
||||
|
#cgo CFLAGS: -DSQLITE_ENABLE_JSON1 |
||||
|
*/ |
||||
|
import "C" |
@ -0,0 +1,14 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
// +build libsqlite3
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
/* |
||||
|
#cgo CFLAGS: -DUSE_LIBSQLITE3 |
||||
|
#cgo linux LDFLAGS: -lsqlite3 |
||||
|
#cgo darwin LDFLAGS: -L/usr/local/opt/sqlite/lib -lsqlite3 |
||||
|
*/ |
||||
|
import "C" |
@ -0,0 +1,63 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
// +build !sqlite_omit_load_extension
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
/* |
||||
|
#include <sqlite3-binding.h> |
||||
|
#include <stdlib.h> |
||||
|
*/ |
||||
|
import "C" |
||||
|
import ( |
||||
|
"errors" |
||||
|
"unsafe" |
||||
|
) |
||||
|
|
||||
|
func (c *SQLiteConn) loadExtensions(extensions []string) error { |
||||
|
rv := C.sqlite3_enable_load_extension(c.db, 1) |
||||
|
if rv != C.SQLITE_OK { |
||||
|
return errors.New(C.GoString(C.sqlite3_errmsg(c.db))) |
||||
|
} |
||||
|
|
||||
|
for _, extension := range extensions { |
||||
|
cext := C.CString(extension) |
||||
|
defer C.free(unsafe.Pointer(cext)) |
||||
|
rv = C.sqlite3_load_extension(c.db, cext, nil, nil) |
||||
|
if rv != C.SQLITE_OK { |
||||
|
return errors.New(C.GoString(C.sqlite3_errmsg(c.db))) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
rv = C.sqlite3_enable_load_extension(c.db, 0) |
||||
|
if rv != C.SQLITE_OK { |
||||
|
return errors.New(C.GoString(C.sqlite3_errmsg(c.db))) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (c *SQLiteConn) LoadExtension(lib string, entry string) error { |
||||
|
rv := C.sqlite3_enable_load_extension(c.db, 1) |
||||
|
if rv != C.SQLITE_OK { |
||||
|
return errors.New(C.GoString(C.sqlite3_errmsg(c.db))) |
||||
|
} |
||||
|
|
||||
|
clib := C.CString(lib) |
||||
|
defer C.free(unsafe.Pointer(clib)) |
||||
|
centry := C.CString(entry) |
||||
|
defer C.free(unsafe.Pointer(centry)) |
||||
|
|
||||
|
rv = C.sqlite3_load_extension(c.db, clib, centry, nil) |
||||
|
if rv != C.SQLITE_OK { |
||||
|
return errors.New(C.GoString(C.sqlite3_errmsg(c.db))) |
||||
|
} |
||||
|
|
||||
|
rv = C.sqlite3_enable_load_extension(c.db, 0) |
||||
|
if rv != C.SQLITE_OK { |
||||
|
return errors.New(C.GoString(C.sqlite3_errmsg(c.db))) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
@ -0,0 +1,23 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
// +build sqlite_omit_load_extension
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
/* |
||||
|
#cgo CFLAGS: -DSQLITE_OMIT_LOAD_EXTENSION |
||||
|
*/ |
||||
|
import "C" |
||||
|
import ( |
||||
|
"errors" |
||||
|
) |
||||
|
|
||||
|
func (c *SQLiteConn) loadExtensions(extensions []string) error { |
||||
|
return errors.New("Extensions have been disabled for static builds") |
||||
|
} |
||||
|
|
||||
|
func (c *SQLiteConn) LoadExtension(lib string, entry string) error { |
||||
|
return errors.New("Extensions have been disabled for static builds") |
||||
|
} |
@ -0,0 +1,13 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
// +build !windows
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
/* |
||||
|
#cgo CFLAGS: -I. |
||||
|
#cgo linux LDFLAGS: -ldl |
||||
|
*/ |
||||
|
import "C" |
1350
vendor/src/github.com/mattn/go-sqlite3/sqlite3_test.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,409 @@ |
|||||
|
package sqlite3_test |
||||
|
|
||||
|
import ( |
||||
|
"database/sql" |
||||
|
"fmt" |
||||
|
"math/rand" |
||||
|
"regexp" |
||||
|
"strconv" |
||||
|
"sync" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
type Dialect int |
||||
|
|
||||
|
const ( |
||||
|
SQLITE Dialect = iota |
||||
|
POSTGRESQL |
||||
|
MYSQL |
||||
|
) |
||||
|
|
||||
|
type DB struct { |
||||
|
*testing.T |
||||
|
*sql.DB |
||||
|
dialect Dialect |
||||
|
once sync.Once |
||||
|
} |
||||
|
|
||||
|
var db *DB |
||||
|
|
||||
|
// the following tables will be created and dropped during the test
|
||||
|
var testTables = []string{"foo", "bar", "t", "bench"} |
||||
|
|
||||
|
var tests = []testing.InternalTest{ |
||||
|
{"TestBlobs", TestBlobs}, |
||||
|
{"TestManyQueryRow", TestManyQueryRow}, |
||||
|
{"TestTxQuery", TestTxQuery}, |
||||
|
{"TestPreparedStmt", TestPreparedStmt}, |
||||
|
} |
||||
|
|
||||
|
var benchmarks = []testing.InternalBenchmark{ |
||||
|
{"BenchmarkExec", BenchmarkExec}, |
||||
|
{"BenchmarkQuery", BenchmarkQuery}, |
||||
|
{"BenchmarkParams", BenchmarkParams}, |
||||
|
{"BenchmarkStmt", BenchmarkStmt}, |
||||
|
{"BenchmarkRows", BenchmarkRows}, |
||||
|
{"BenchmarkStmtRows", BenchmarkStmtRows}, |
||||
|
} |
||||
|
|
||||
|
// RunTests runs the SQL test suite
|
||||
|
func RunTests(t *testing.T, d *sql.DB, dialect Dialect) { |
||||
|
db = &DB{t, d, dialect, sync.Once{}} |
||||
|
testing.RunTests(func(string, string) (bool, error) { return true, nil }, tests) |
||||
|
|
||||
|
if !testing.Short() { |
||||
|
for _, b := range benchmarks { |
||||
|
fmt.Printf("%-20s", b.Name) |
||||
|
r := testing.Benchmark(b.F) |
||||
|
fmt.Printf("%10d %10.0f req/s\n", r.N, float64(r.N)/r.T.Seconds()) |
||||
|
} |
||||
|
} |
||||
|
db.tearDown() |
||||
|
} |
||||
|
|
||||
|
func (db *DB) mustExec(sql string, args ...interface{}) sql.Result { |
||||
|
res, err := db.Exec(sql, args...) |
||||
|
if err != nil { |
||||
|
db.Fatalf("Error running %q: %v", sql, err) |
||||
|
} |
||||
|
return res |
||||
|
} |
||||
|
|
||||
|
func (db *DB) tearDown() { |
||||
|
for _, tbl := range testTables { |
||||
|
switch db.dialect { |
||||
|
case SQLITE: |
||||
|
db.mustExec("drop table if exists " + tbl) |
||||
|
case MYSQL, POSTGRESQL: |
||||
|
db.mustExec("drop table if exists " + tbl) |
||||
|
default: |
||||
|
db.Fatal("unkown dialect") |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// q replaces ? parameters if needed
|
||||
|
func (db *DB) q(sql string) string { |
||||
|
switch db.dialect { |
||||
|
case POSTGRESQL: // repace with $1, $2, ..
|
||||
|
qrx := regexp.MustCompile(`\?`) |
||||
|
n := 0 |
||||
|
return qrx.ReplaceAllStringFunc(sql, func(string) string { |
||||
|
n++ |
||||
|
return "$" + strconv.Itoa(n) |
||||
|
}) |
||||
|
} |
||||
|
return sql |
||||
|
} |
||||
|
|
||||
|
func (db *DB) blobType(size int) string { |
||||
|
switch db.dialect { |
||||
|
case SQLITE: |
||||
|
return fmt.Sprintf("blob[%d]", size) |
||||
|
case POSTGRESQL: |
||||
|
return "bytea" |
||||
|
case MYSQL: |
||||
|
return fmt.Sprintf("VARBINARY(%d)", size) |
||||
|
} |
||||
|
panic("unkown dialect") |
||||
|
} |
||||
|
|
||||
|
func (db *DB) serialPK() string { |
||||
|
switch db.dialect { |
||||
|
case SQLITE: |
||||
|
return "integer primary key autoincrement" |
||||
|
case POSTGRESQL: |
||||
|
return "serial primary key" |
||||
|
case MYSQL: |
||||
|
return "integer primary key auto_increment" |
||||
|
} |
||||
|
panic("unkown dialect") |
||||
|
} |
||||
|
|
||||
|
func (db *DB) now() string { |
||||
|
switch db.dialect { |
||||
|
case SQLITE: |
||||
|
return "datetime('now')" |
||||
|
case POSTGRESQL: |
||||
|
return "now()" |
||||
|
case MYSQL: |
||||
|
return "now()" |
||||
|
} |
||||
|
panic("unkown dialect") |
||||
|
} |
||||
|
|
||||
|
func makeBench() { |
||||
|
if _, err := db.Exec("create table bench (n varchar(32), i integer, d double, s varchar(32), t datetime)"); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
st, err := db.Prepare("insert into bench values (?, ?, ?, ?, ?)") |
||||
|
if err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
defer st.Close() |
||||
|
for i := 0; i < 100; i++ { |
||||
|
if _, err = st.Exec(nil, i, float64(i), fmt.Sprintf("%d", i), time.Now()); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestResult(t *testing.T) { |
||||
|
db.tearDown() |
||||
|
db.mustExec("create temporary table test (id " + db.serialPK() + ", name varchar(10))") |
||||
|
|
||||
|
for i := 1; i < 3; i++ { |
||||
|
r := db.mustExec(db.q("insert into test (name) values (?)"), fmt.Sprintf("row %d", i)) |
||||
|
n, err := r.RowsAffected() |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
if n != 1 { |
||||
|
t.Errorf("got %v, want %v", n, 1) |
||||
|
} |
||||
|
n, err = r.LastInsertId() |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
if n != int64(i) { |
||||
|
t.Errorf("got %v, want %v", n, i) |
||||
|
} |
||||
|
} |
||||
|
if _, err := db.Exec("error!"); err == nil { |
||||
|
t.Fatalf("expected error") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestBlobs(t *testing.T) { |
||||
|
db.tearDown() |
||||
|
var blob = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} |
||||
|
db.mustExec("create table foo (id integer primary key, bar " + db.blobType(16) + ")") |
||||
|
db.mustExec(db.q("insert into foo (id, bar) values(?,?)"), 0, blob) |
||||
|
|
||||
|
want := fmt.Sprintf("%x", blob) |
||||
|
|
||||
|
b := make([]byte, 16) |
||||
|
err := db.QueryRow(db.q("select bar from foo where id = ?"), 0).Scan(&b) |
||||
|
got := fmt.Sprintf("%x", b) |
||||
|
if err != nil { |
||||
|
t.Errorf("[]byte scan: %v", err) |
||||
|
} else if got != want { |
||||
|
t.Errorf("for []byte, got %q; want %q", got, want) |
||||
|
} |
||||
|
|
||||
|
err = db.QueryRow(db.q("select bar from foo where id = ?"), 0).Scan(&got) |
||||
|
want = string(blob) |
||||
|
if err != nil { |
||||
|
t.Errorf("string scan: %v", err) |
||||
|
} else if got != want { |
||||
|
t.Errorf("for string, got %q; want %q", got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestManyQueryRow(t *testing.T) { |
||||
|
if testing.Short() { |
||||
|
t.Log("skipping in short mode") |
||||
|
return |
||||
|
} |
||||
|
db.tearDown() |
||||
|
db.mustExec("create table foo (id integer primary key, name varchar(50))") |
||||
|
db.mustExec(db.q("insert into foo (id, name) values(?,?)"), 1, "bob") |
||||
|
var name string |
||||
|
for i := 0; i < 10000; i++ { |
||||
|
err := db.QueryRow(db.q("select name from foo where id = ?"), 1).Scan(&name) |
||||
|
if err != nil || name != "bob" { |
||||
|
t.Fatalf("on query %d: err=%v, name=%q", i, err, name) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestTxQuery(t *testing.T) { |
||||
|
db.tearDown() |
||||
|
tx, err := db.Begin() |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
defer tx.Rollback() |
||||
|
|
||||
|
_, err = tx.Exec("create table foo (id integer primary key, name varchar(50))") |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
_, err = tx.Exec(db.q("insert into foo (id, name) values(?,?)"), 1, "bob") |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
r, err := tx.Query(db.q("select name from foo where id = ?"), 1) |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
defer r.Close() |
||||
|
|
||||
|
if !r.Next() { |
||||
|
if r.Err() != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
t.Fatal("expected one rows") |
||||
|
} |
||||
|
|
||||
|
var name string |
||||
|
err = r.Scan(&name) |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestPreparedStmt(t *testing.T) { |
||||
|
db.tearDown() |
||||
|
db.mustExec("CREATE TABLE t (count INT)") |
||||
|
sel, err := db.Prepare("SELECT count FROM t ORDER BY count DESC") |
||||
|
if err != nil { |
||||
|
t.Fatalf("prepare 1: %v", err) |
||||
|
} |
||||
|
ins, err := db.Prepare(db.q("INSERT INTO t (count) VALUES (?)")) |
||||
|
if err != nil { |
||||
|
t.Fatalf("prepare 2: %v", err) |
||||
|
} |
||||
|
|
||||
|
for n := 1; n <= 3; n++ { |
||||
|
if _, err := ins.Exec(n); err != nil { |
||||
|
t.Fatalf("insert(%d) = %v", n, err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
const nRuns = 10 |
||||
|
var wg sync.WaitGroup |
||||
|
for i := 0; i < nRuns; i++ { |
||||
|
wg.Add(1) |
||||
|
go func() { |
||||
|
defer wg.Done() |
||||
|
for j := 0; j < 10; j++ { |
||||
|
count := 0 |
||||
|
if err := sel.QueryRow().Scan(&count); err != nil && err != sql.ErrNoRows { |
||||
|
t.Errorf("Query: %v", err) |
||||
|
return |
||||
|
} |
||||
|
if _, err := ins.Exec(rand.Intn(100)); err != nil { |
||||
|
t.Errorf("Insert: %v", err) |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
}() |
||||
|
} |
||||
|
wg.Wait() |
||||
|
} |
||||
|
|
||||
|
// Benchmarks need to use panic() since b.Error errors are lost when
|
||||
|
// running via testing.Benchmark() I would like to run these via go
|
||||
|
// test -bench but calling Benchmark() from a benchmark test
|
||||
|
// currently hangs go.
|
||||
|
|
||||
|
func BenchmarkExec(b *testing.B) { |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
if _, err := db.Exec("select 1"); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkQuery(b *testing.B) { |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
var n sql.NullString |
||||
|
var i int |
||||
|
var f float64 |
||||
|
var s string |
||||
|
// var t time.Time
|
||||
|
if err := db.QueryRow("select null, 1, 1.1, 'foo'").Scan(&n, &i, &f, &s); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkParams(b *testing.B) { |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
var n sql.NullString |
||||
|
var i int |
||||
|
var f float64 |
||||
|
var s string |
||||
|
// var t time.Time
|
||||
|
if err := db.QueryRow("select ?, ?, ?, ?", nil, 1, 1.1, "foo").Scan(&n, &i, &f, &s); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkStmt(b *testing.B) { |
||||
|
st, err := db.Prepare("select ?, ?, ?, ?") |
||||
|
if err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
defer st.Close() |
||||
|
|
||||
|
for n := 0; n < b.N; n++ { |
||||
|
var n sql.NullString |
||||
|
var i int |
||||
|
var f float64 |
||||
|
var s string |
||||
|
// var t time.Time
|
||||
|
if err := st.QueryRow(nil, 1, 1.1, "foo").Scan(&n, &i, &f, &s); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkRows(b *testing.B) { |
||||
|
db.once.Do(makeBench) |
||||
|
|
||||
|
for n := 0; n < b.N; n++ { |
||||
|
var n sql.NullString |
||||
|
var i int |
||||
|
var f float64 |
||||
|
var s string |
||||
|
var t time.Time |
||||
|
r, err := db.Query("select * from bench") |
||||
|
if err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
for r.Next() { |
||||
|
if err = r.Scan(&n, &i, &f, &s, &t); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
} |
||||
|
if err = r.Err(); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkStmtRows(b *testing.B) { |
||||
|
db.once.Do(makeBench) |
||||
|
|
||||
|
st, err := db.Prepare("select * from bench") |
||||
|
if err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
defer st.Close() |
||||
|
|
||||
|
for n := 0; n < b.N; n++ { |
||||
|
var n sql.NullString |
||||
|
var i int |
||||
|
var f float64 |
||||
|
var s string |
||||
|
var t time.Time |
||||
|
r, err := st.Query() |
||||
|
if err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
for r.Next() { |
||||
|
if err = r.Scan(&n, &i, &f, &s, &t); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
} |
||||
|
if err = r.Err(); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,14 @@ |
|||||
|
// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
|
||||
|
//
|
||||
|
// Use of this source code is governed by an MIT-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
// +build windows
|
||||
|
|
||||
|
package sqlite3 |
||||
|
|
||||
|
/* |
||||
|
#cgo CFLAGS: -I. -fno-stack-check -fno-stack-protector -mno-stack-arg-probe |
||||
|
#cgo windows,386 CFLAGS: -D_USE_32BIT_TIME_T |
||||
|
#cgo LDFLAGS: -lmingwex -lmingw32 |
||||
|
*/ |
||||
|
import "C" |
@ -0,0 +1,546 @@ |
|||||
|
/* |
||||
|
** 2006 June 7 |
||||
|
** |
||||
|
** The author disclaims copyright to this source code. In place of |
||||
|
** a legal notice, here is a blessing: |
||||
|
** |
||||
|
** May you do good and not evil. |
||||
|
** May you find forgiveness for yourself and forgive others. |
||||
|
** May you share freely, never taking more than you give. |
||||
|
** |
||||
|
************************************************************************* |
||||
|
** This header file defines the SQLite interface for use by |
||||
|
** shared libraries that want to be imported as extensions into |
||||
|
** an SQLite instance. Shared libraries that intend to be loaded |
||||
|
** as extensions by SQLite should #include this file instead of |
||||
|
** sqlite3.h. |
||||
|
*/ |
||||
|
#ifndef _SQLITE3EXT_H_ |
||||
|
#define _SQLITE3EXT_H_ |
||||
|
#include "sqlite3-binding.h" |
||||
|
|
||||
|
typedef struct sqlite3_api_routines sqlite3_api_routines; |
||||
|
|
||||
|
/* |
||||
|
** The following structure holds pointers to all of the SQLite API |
||||
|
** routines. |
||||
|
** |
||||
|
** WARNING: In order to maintain backwards compatibility, add new |
||||
|
** interfaces to the end of this structure only. If you insert new |
||||
|
** interfaces in the middle of this structure, then older different |
||||
|
** versions of SQLite will not be able to load each other's shared |
||||
|
** libraries! |
||||
|
*/ |
||||
|
struct sqlite3_api_routines { |
||||
|
void * (*aggregate_context)(sqlite3_context*,int nBytes); |
||||
|
int (*aggregate_count)(sqlite3_context*); |
||||
|
int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*)); |
||||
|
int (*bind_double)(sqlite3_stmt*,int,double); |
||||
|
int (*bind_int)(sqlite3_stmt*,int,int); |
||||
|
int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64); |
||||
|
int (*bind_null)(sqlite3_stmt*,int); |
||||
|
int (*bind_parameter_count)(sqlite3_stmt*); |
||||
|
int (*bind_parameter_index)(sqlite3_stmt*,const char*zName); |
||||
|
const char * (*bind_parameter_name)(sqlite3_stmt*,int); |
||||
|
int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*)); |
||||
|
int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*)); |
||||
|
int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*); |
||||
|
int (*busy_handler)(sqlite3*,int(*)(void*,int),void*); |
||||
|
int (*busy_timeout)(sqlite3*,int ms); |
||||
|
int (*changes)(sqlite3*); |
||||
|
int (*close)(sqlite3*); |
||||
|
int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*, |
||||
|
int eTextRep,const char*)); |
||||
|
int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*, |
||||
|
int eTextRep,const void*)); |
||||
|
const void * (*column_blob)(sqlite3_stmt*,int iCol); |
||||
|
int (*column_bytes)(sqlite3_stmt*,int iCol); |
||||
|
int (*column_bytes16)(sqlite3_stmt*,int iCol); |
||||
|
int (*column_count)(sqlite3_stmt*pStmt); |
||||
|
const char * (*column_database_name)(sqlite3_stmt*,int); |
||||
|
const void * (*column_database_name16)(sqlite3_stmt*,int); |
||||
|
const char * (*column_decltype)(sqlite3_stmt*,int i); |
||||
|
const void * (*column_decltype16)(sqlite3_stmt*,int); |
||||
|
double (*column_double)(sqlite3_stmt*,int iCol); |
||||
|
int (*column_int)(sqlite3_stmt*,int iCol); |
||||
|
sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol); |
||||
|
const char * (*column_name)(sqlite3_stmt*,int); |
||||
|
const void * (*column_name16)(sqlite3_stmt*,int); |
||||
|
const char * (*column_origin_name)(sqlite3_stmt*,int); |
||||
|
const void * (*column_origin_name16)(sqlite3_stmt*,int); |
||||
|
const char * (*column_table_name)(sqlite3_stmt*,int); |
||||
|
const void * (*column_table_name16)(sqlite3_stmt*,int); |
||||
|
const unsigned char * (*column_text)(sqlite3_stmt*,int iCol); |
||||
|
const void * (*column_text16)(sqlite3_stmt*,int iCol); |
||||
|
int (*column_type)(sqlite3_stmt*,int iCol); |
||||
|
sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol); |
||||
|
void * (*commit_hook)(sqlite3*,int(*)(void*),void*); |
||||
|
int (*complete)(const char*sql); |
||||
|
int (*complete16)(const void*sql); |
||||
|
int (*create_collation)(sqlite3*,const char*,int,void*, |
||||
|
int(*)(void*,int,const void*,int,const void*)); |
||||
|
int (*create_collation16)(sqlite3*,const void*,int,void*, |
||||
|
int(*)(void*,int,const void*,int,const void*)); |
||||
|
int (*create_function)(sqlite3*,const char*,int,int,void*, |
||||
|
void (*xFunc)(sqlite3_context*,int,sqlite3_value**), |
||||
|
void (*xStep)(sqlite3_context*,int,sqlite3_value**), |
||||
|
void (*xFinal)(sqlite3_context*)); |
||||
|
int (*create_function16)(sqlite3*,const void*,int,int,void*, |
||||
|
void (*xFunc)(sqlite3_context*,int,sqlite3_value**), |
||||
|
void (*xStep)(sqlite3_context*,int,sqlite3_value**), |
||||
|
void (*xFinal)(sqlite3_context*)); |
||||
|
int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*); |
||||
|
int (*data_count)(sqlite3_stmt*pStmt); |
||||
|
sqlite3 * (*db_handle)(sqlite3_stmt*); |
||||
|
int (*declare_vtab)(sqlite3*,const char*); |
||||
|
int (*enable_shared_cache)(int); |
||||
|
int (*errcode)(sqlite3*db); |
||||
|
const char * (*errmsg)(sqlite3*); |
||||
|
const void * (*errmsg16)(sqlite3*); |
||||
|
int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**); |
||||
|
int (*expired)(sqlite3_stmt*); |
||||
|
int (*finalize)(sqlite3_stmt*pStmt); |
||||
|
void (*free)(void*); |
||||
|
void (*free_table)(char**result); |
||||
|
int (*get_autocommit)(sqlite3*); |
||||
|
void * (*get_auxdata)(sqlite3_context*,int); |
||||
|
int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**); |
||||
|
int (*global_recover)(void); |
||||
|
void (*interruptx)(sqlite3*); |
||||
|
sqlite_int64 (*last_insert_rowid)(sqlite3*); |
||||
|
const char * (*libversion)(void); |
||||
|
int (*libversion_number)(void); |
||||
|
void *(*malloc)(int); |
||||
|
char * (*mprintf)(const char*,...); |
||||
|
int (*open)(const char*,sqlite3**); |
||||
|
int (*open16)(const void*,sqlite3**); |
||||
|
int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); |
||||
|
int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); |
||||
|
void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*); |
||||
|
void (*progress_handler)(sqlite3*,int,int(*)(void*),void*); |
||||
|
void *(*realloc)(void*,int); |
||||
|
int (*reset)(sqlite3_stmt*pStmt); |
||||
|
void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*)); |
||||
|
void (*result_double)(sqlite3_context*,double); |
||||
|
void (*result_error)(sqlite3_context*,const char*,int); |
||||
|
void (*result_error16)(sqlite3_context*,const void*,int); |
||||
|
void (*result_int)(sqlite3_context*,int); |
||||
|
void (*result_int64)(sqlite3_context*,sqlite_int64); |
||||
|
void (*result_null)(sqlite3_context*); |
||||
|
void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*)); |
||||
|
void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*)); |
||||
|
void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*)); |
||||
|
void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*)); |
||||
|
void (*result_value)(sqlite3_context*,sqlite3_value*); |
||||
|
void * (*rollback_hook)(sqlite3*,void(*)(void*),void*); |
||||
|
int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*, |
||||
|
const char*,const char*),void*); |
||||
|
void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*)); |
||||
|
char * (*snprintf)(int,char*,const char*,...); |
||||
|
int (*step)(sqlite3_stmt*); |
||||
|
int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*, |
||||
|
char const**,char const**,int*,int*,int*); |
||||
|
void (*thread_cleanup)(void); |
||||
|
int (*total_changes)(sqlite3*); |
||||
|
void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*); |
||||
|
int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*); |
||||
|
void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*, |
||||
|
sqlite_int64),void*); |
||||
|
void * (*user_data)(sqlite3_context*); |
||||
|
const void * (*value_blob)(sqlite3_value*); |
||||
|
int (*value_bytes)(sqlite3_value*); |
||||
|
int (*value_bytes16)(sqlite3_value*); |
||||
|
double (*value_double)(sqlite3_value*); |
||||
|
int (*value_int)(sqlite3_value*); |
||||
|
sqlite_int64 (*value_int64)(sqlite3_value*); |
||||
|
int (*value_numeric_type)(sqlite3_value*); |
||||
|
const unsigned char * (*value_text)(sqlite3_value*); |
||||
|
const void * (*value_text16)(sqlite3_value*); |
||||
|
const void * (*value_text16be)(sqlite3_value*); |
||||
|
const void * (*value_text16le)(sqlite3_value*); |
||||
|
int (*value_type)(sqlite3_value*); |
||||
|
char *(*vmprintf)(const char*,va_list); |
||||
|
/* Added ??? */ |
||||
|
int (*overload_function)(sqlite3*, const char *zFuncName, int nArg); |
||||
|
/* Added by 3.3.13 */ |
||||
|
int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**); |
||||
|
int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**); |
||||
|
int (*clear_bindings)(sqlite3_stmt*); |
||||
|
/* Added by 3.4.1 */ |
||||
|
int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*, |
||||
|
void (*xDestroy)(void *)); |
||||
|
/* Added by 3.5.0 */ |
||||
|
int (*bind_zeroblob)(sqlite3_stmt*,int,int); |
||||
|
int (*blob_bytes)(sqlite3_blob*); |
||||
|
int (*blob_close)(sqlite3_blob*); |
||||
|
int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64, |
||||
|
int,sqlite3_blob**); |
||||
|
int (*blob_read)(sqlite3_blob*,void*,int,int); |
||||
|
int (*blob_write)(sqlite3_blob*,const void*,int,int); |
||||
|
int (*create_collation_v2)(sqlite3*,const char*,int,void*, |
||||
|
int(*)(void*,int,const void*,int,const void*), |
||||
|
void(*)(void*)); |
||||
|
int (*file_control)(sqlite3*,const char*,int,void*); |
||||
|
sqlite3_int64 (*memory_highwater)(int); |
||||
|
sqlite3_int64 (*memory_used)(void); |
||||
|
sqlite3_mutex *(*mutex_alloc)(int); |
||||
|
void (*mutex_enter)(sqlite3_mutex*); |
||||
|
void (*mutex_free)(sqlite3_mutex*); |
||||
|
void (*mutex_leave)(sqlite3_mutex*); |
||||
|
int (*mutex_try)(sqlite3_mutex*); |
||||
|
int (*open_v2)(const char*,sqlite3**,int,const char*); |
||||
|
int (*release_memory)(int); |
||||
|
void (*result_error_nomem)(sqlite3_context*); |
||||
|
void (*result_error_toobig)(sqlite3_context*); |
||||
|
int (*sleep)(int); |
||||
|
void (*soft_heap_limit)(int); |
||||
|
sqlite3_vfs *(*vfs_find)(const char*); |
||||
|
int (*vfs_register)(sqlite3_vfs*,int); |
||||
|
int (*vfs_unregister)(sqlite3_vfs*); |
||||
|
int (*xthreadsafe)(void); |
||||
|
void (*result_zeroblob)(sqlite3_context*,int); |
||||
|
void (*result_error_code)(sqlite3_context*,int); |
||||
|
int (*test_control)(int, ...); |
||||
|
void (*randomness)(int,void*); |
||||
|
sqlite3 *(*context_db_handle)(sqlite3_context*); |
||||
|
int (*extended_result_codes)(sqlite3*,int); |
||||
|
int (*limit)(sqlite3*,int,int); |
||||
|
sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*); |
||||
|
const char *(*sql)(sqlite3_stmt*); |
||||
|
int (*status)(int,int*,int*,int); |
||||
|
int (*backup_finish)(sqlite3_backup*); |
||||
|
sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*); |
||||
|
int (*backup_pagecount)(sqlite3_backup*); |
||||
|
int (*backup_remaining)(sqlite3_backup*); |
||||
|
int (*backup_step)(sqlite3_backup*,int); |
||||
|
const char *(*compileoption_get)(int); |
||||
|
int (*compileoption_used)(const char*); |
||||
|
int (*create_function_v2)(sqlite3*,const char*,int,int,void*, |
||||
|
void (*xFunc)(sqlite3_context*,int,sqlite3_value**), |
||||
|
void (*xStep)(sqlite3_context*,int,sqlite3_value**), |
||||
|
void (*xFinal)(sqlite3_context*), |
||||
|
void(*xDestroy)(void*)); |
||||
|
int (*db_config)(sqlite3*,int,...); |
||||
|
sqlite3_mutex *(*db_mutex)(sqlite3*); |
||||
|
int (*db_status)(sqlite3*,int,int*,int*,int); |
||||
|
int (*extended_errcode)(sqlite3*); |
||||
|
void (*log)(int,const char*,...); |
||||
|
sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64); |
||||
|
const char *(*sourceid)(void); |
||||
|
int (*stmt_status)(sqlite3_stmt*,int,int); |
||||
|
int (*strnicmp)(const char*,const char*,int); |
||||
|
int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*); |
||||
|
int (*wal_autocheckpoint)(sqlite3*,int); |
||||
|
int (*wal_checkpoint)(sqlite3*,const char*); |
||||
|
void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*); |
||||
|
int (*blob_reopen)(sqlite3_blob*,sqlite3_int64); |
||||
|
int (*vtab_config)(sqlite3*,int op,...); |
||||
|
int (*vtab_on_conflict)(sqlite3*); |
||||
|
/* Version 3.7.16 and later */ |
||||
|
int (*close_v2)(sqlite3*); |
||||
|
const char *(*db_filename)(sqlite3*,const char*); |
||||
|
int (*db_readonly)(sqlite3*,const char*); |
||||
|
int (*db_release_memory)(sqlite3*); |
||||
|
const char *(*errstr)(int); |
||||
|
int (*stmt_busy)(sqlite3_stmt*); |
||||
|
int (*stmt_readonly)(sqlite3_stmt*); |
||||
|
int (*stricmp)(const char*,const char*); |
||||
|
int (*uri_boolean)(const char*,const char*,int); |
||||
|
sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64); |
||||
|
const char *(*uri_parameter)(const char*,const char*); |
||||
|
char *(*vsnprintf)(int,char*,const char*,va_list); |
||||
|
int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*); |
||||
|
/* Version 3.8.7 and later */ |
||||
|
int (*auto_extension)(void(*)(void)); |
||||
|
int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64, |
||||
|
void(*)(void*)); |
||||
|
int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64, |
||||
|
void(*)(void*),unsigned char); |
||||
|
int (*cancel_auto_extension)(void(*)(void)); |
||||
|
int (*load_extension)(sqlite3*,const char*,const char*,char**); |
||||
|
void *(*malloc64)(sqlite3_uint64); |
||||
|
sqlite3_uint64 (*msize)(void*); |
||||
|
void *(*realloc64)(void*,sqlite3_uint64); |
||||
|
void (*reset_auto_extension)(void); |
||||
|
void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64, |
||||
|
void(*)(void*)); |
||||
|
void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64, |
||||
|
void(*)(void*), unsigned char); |
||||
|
int (*strglob)(const char*,const char*); |
||||
|
/* Version 3.8.11 and later */ |
||||
|
sqlite3_value *(*value_dup)(const sqlite3_value*); |
||||
|
void (*value_free)(sqlite3_value*); |
||||
|
int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64); |
||||
|
int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64); |
||||
|
/* Version 3.9.0 and later */ |
||||
|
unsigned int (*value_subtype)(sqlite3_value*); |
||||
|
void (*result_subtype)(sqlite3_context*,unsigned int); |
||||
|
/* Version 3.10.0 and later */ |
||||
|
int (*status64)(int,sqlite3_int64*,sqlite3_int64*,int); |
||||
|
int (*strlike)(const char*,const char*,unsigned int); |
||||
|
int (*db_cacheflush)(sqlite3*); |
||||
|
/* Version 3.12.0 and later */ |
||||
|
int (*system_errno)(sqlite3*); |
||||
|
}; |
||||
|
|
||||
|
/* |
||||
|
** The following macros redefine the API routines so that they are |
||||
|
** redirected through the global sqlite3_api structure. |
||||
|
** |
||||
|
** This header file is also used by the loadext.c source file |
||||
|
** (part of the main SQLite library - not an extension) so that |
||||
|
** it can get access to the sqlite3_api_routines structure |
||||
|
** definition. But the main library does not want to redefine |
||||
|
** the API. So the redefinition macros are only valid if the |
||||
|
** SQLITE_CORE macros is undefined. |
||||
|
*/ |
||||
|
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) |
||||
|
#define sqlite3_aggregate_context sqlite3_api->aggregate_context |
||||
|
#ifndef SQLITE_OMIT_DEPRECATED |
||||
|
#define sqlite3_aggregate_count sqlite3_api->aggregate_count |
||||
|
#endif |
||||
|
#define sqlite3_bind_blob sqlite3_api->bind_blob |
||||
|
#define sqlite3_bind_double sqlite3_api->bind_double |
||||
|
#define sqlite3_bind_int sqlite3_api->bind_int |
||||
|
#define sqlite3_bind_int64 sqlite3_api->bind_int64 |
||||
|
#define sqlite3_bind_null sqlite3_api->bind_null |
||||
|
#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count |
||||
|
#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index |
||||
|
#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name |
||||
|
#define sqlite3_bind_text sqlite3_api->bind_text |
||||
|
#define sqlite3_bind_text16 sqlite3_api->bind_text16 |
||||
|
#define sqlite3_bind_value sqlite3_api->bind_value |
||||
|
#define sqlite3_busy_handler sqlite3_api->busy_handler |
||||
|
#define sqlite3_busy_timeout sqlite3_api->busy_timeout |
||||
|
#define sqlite3_changes sqlite3_api->changes |
||||
|
#define sqlite3_close sqlite3_api->close |
||||
|
#define sqlite3_collation_needed sqlite3_api->collation_needed |
||||
|
#define sqlite3_collation_needed16 sqlite3_api->collation_needed16 |
||||
|
#define sqlite3_column_blob sqlite3_api->column_blob |
||||
|
#define sqlite3_column_bytes sqlite3_api->column_bytes |
||||
|
#define sqlite3_column_bytes16 sqlite3_api->column_bytes16 |
||||
|
#define sqlite3_column_count sqlite3_api->column_count |
||||
|
#define sqlite3_column_database_name sqlite3_api->column_database_name |
||||
|
#define sqlite3_column_database_name16 sqlite3_api->column_database_name16 |
||||
|
#define sqlite3_column_decltype sqlite3_api->column_decltype |
||||
|
#define sqlite3_column_decltype16 sqlite3_api->column_decltype16 |
||||
|
#define sqlite3_column_double sqlite3_api->column_double |
||||
|
#define sqlite3_column_int sqlite3_api->column_int |
||||
|
#define sqlite3_column_int64 sqlite3_api->column_int64 |
||||
|
#define sqlite3_column_name sqlite3_api->column_name |
||||
|
#define sqlite3_column_name16 sqlite3_api->column_name16 |
||||
|
#define sqlite3_column_origin_name sqlite3_api->column_origin_name |
||||
|
#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16 |
||||
|
#define sqlite3_column_table_name sqlite3_api->column_table_name |
||||
|
#define sqlite3_column_table_name16 sqlite3_api->column_table_name16 |
||||
|
#define sqlite3_column_text sqlite3_api->column_text |
||||
|
#define sqlite3_column_text16 sqlite3_api->column_text16 |
||||
|
#define sqlite3_column_type sqlite3_api->column_type |
||||
|
#define sqlite3_column_value sqlite3_api->column_value |
||||
|
#define sqlite3_commit_hook sqlite3_api->commit_hook |
||||
|
#define sqlite3_complete sqlite3_api->complete |
||||
|
#define sqlite3_complete16 sqlite3_api->complete16 |
||||
|
#define sqlite3_create_collation sqlite3_api->create_collation |
||||
|
#define sqlite3_create_collation16 sqlite3_api->create_collation16 |
||||
|
#define sqlite3_create_function sqlite3_api->create_function |
||||
|
#define sqlite3_create_function16 sqlite3_api->create_function16 |
||||
|
#define sqlite3_create_module sqlite3_api->create_module |
||||
|
#define sqlite3_create_module_v2 sqlite3_api->create_module_v2 |
||||
|
#define sqlite3_data_count sqlite3_api->data_count |
||||
|
#define sqlite3_db_handle sqlite3_api->db_handle |
||||
|
#define sqlite3_declare_vtab sqlite3_api->declare_vtab |
||||
|
#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache |
||||
|
#define sqlite3_errcode sqlite3_api->errcode |
||||
|
#define sqlite3_errmsg sqlite3_api->errmsg |
||||
|
#define sqlite3_errmsg16 sqlite3_api->errmsg16 |
||||
|
#define sqlite3_exec sqlite3_api->exec |
||||
|
#ifndef SQLITE_OMIT_DEPRECATED |
||||
|
#define sqlite3_expired sqlite3_api->expired |
||||
|
#endif |
||||
|
#define sqlite3_finalize sqlite3_api->finalize |
||||
|
#define sqlite3_free sqlite3_api->free |
||||
|
#define sqlite3_free_table sqlite3_api->free_table |
||||
|
#define sqlite3_get_autocommit sqlite3_api->get_autocommit |
||||
|
#define sqlite3_get_auxdata sqlite3_api->get_auxdata |
||||
|
#define sqlite3_get_table sqlite3_api->get_table |
||||
|
#ifndef SQLITE_OMIT_DEPRECATED |
||||
|
#define sqlite3_global_recover sqlite3_api->global_recover |
||||
|
#endif |
||||
|
#define sqlite3_interrupt sqlite3_api->interruptx |
||||
|
#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid |
||||
|
#define sqlite3_libversion sqlite3_api->libversion |
||||
|
#define sqlite3_libversion_number sqlite3_api->libversion_number |
||||
|
#define sqlite3_malloc sqlite3_api->malloc |
||||
|
#define sqlite3_mprintf sqlite3_api->mprintf |
||||
|
#define sqlite3_open sqlite3_api->open |
||||
|
#define sqlite3_open16 sqlite3_api->open16 |
||||
|
#define sqlite3_prepare sqlite3_api->prepare |
||||
|
#define sqlite3_prepare16 sqlite3_api->prepare16 |
||||
|
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 |
||||
|
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 |
||||
|
#define sqlite3_profile sqlite3_api->profile |
||||
|
#define sqlite3_progress_handler sqlite3_api->progress_handler |
||||
|
#define sqlite3_realloc sqlite3_api->realloc |
||||
|
#define sqlite3_reset sqlite3_api->reset |
||||
|
#define sqlite3_result_blob sqlite3_api->result_blob |
||||
|
#define sqlite3_result_double sqlite3_api->result_double |
||||
|
#define sqlite3_result_error sqlite3_api->result_error |
||||
|
#define sqlite3_result_error16 sqlite3_api->result_error16 |
||||
|
#define sqlite3_result_int sqlite3_api->result_int |
||||
|
#define sqlite3_result_int64 sqlite3_api->result_int64 |
||||
|
#define sqlite3_result_null sqlite3_api->result_null |
||||
|
#define sqlite3_result_text sqlite3_api->result_text |
||||
|
#define sqlite3_result_text16 sqlite3_api->result_text16 |
||||
|
#define sqlite3_result_text16be sqlite3_api->result_text16be |
||||
|
#define sqlite3_result_text16le sqlite3_api->result_text16le |
||||
|
#define sqlite3_result_value sqlite3_api->result_value |
||||
|
#define sqlite3_rollback_hook sqlite3_api->rollback_hook |
||||
|
#define sqlite3_set_authorizer sqlite3_api->set_authorizer |
||||
|
#define sqlite3_set_auxdata sqlite3_api->set_auxdata |
||||
|
#define sqlite3_snprintf sqlite3_api->snprintf |
||||
|
#define sqlite3_step sqlite3_api->step |
||||
|
#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata |
||||
|
#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup |
||||
|
#define sqlite3_total_changes sqlite3_api->total_changes |
||||
|
#define sqlite3_trace sqlite3_api->trace |
||||
|
#ifndef SQLITE_OMIT_DEPRECATED |
||||
|
#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings |
||||
|
#endif |
||||
|
#define sqlite3_update_hook sqlite3_api->update_hook |
||||
|
#define sqlite3_user_data sqlite3_api->user_data |
||||
|
#define sqlite3_value_blob sqlite3_api->value_blob |
||||
|
#define sqlite3_value_bytes sqlite3_api->value_bytes |
||||
|
#define sqlite3_value_bytes16 sqlite3_api->value_bytes16 |
||||
|
#define sqlite3_value_double sqlite3_api->value_double |
||||
|
#define sqlite3_value_int sqlite3_api->value_int |
||||
|
#define sqlite3_value_int64 sqlite3_api->value_int64 |
||||
|
#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type |
||||
|
#define sqlite3_value_text sqlite3_api->value_text |
||||
|
#define sqlite3_value_text16 sqlite3_api->value_text16 |
||||
|
#define sqlite3_value_text16be sqlite3_api->value_text16be |
||||
|
#define sqlite3_value_text16le sqlite3_api->value_text16le |
||||
|
#define sqlite3_value_type sqlite3_api->value_type |
||||
|
#define sqlite3_vmprintf sqlite3_api->vmprintf |
||||
|
#define sqlite3_vsnprintf sqlite3_api->vsnprintf |
||||
|
#define sqlite3_overload_function sqlite3_api->overload_function |
||||
|
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2 |
||||
|
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2 |
||||
|
#define sqlite3_clear_bindings sqlite3_api->clear_bindings |
||||
|
#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob |
||||
|
#define sqlite3_blob_bytes sqlite3_api->blob_bytes |
||||
|
#define sqlite3_blob_close sqlite3_api->blob_close |
||||
|
#define sqlite3_blob_open sqlite3_api->blob_open |
||||
|
#define sqlite3_blob_read sqlite3_api->blob_read |
||||
|
#define sqlite3_blob_write sqlite3_api->blob_write |
||||
|
#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2 |
||||
|
#define sqlite3_file_control sqlite3_api->file_control |
||||
|
#define sqlite3_memory_highwater sqlite3_api->memory_highwater |
||||
|
#define sqlite3_memory_used sqlite3_api->memory_used |
||||
|
#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc |
||||
|
#define sqlite3_mutex_enter sqlite3_api->mutex_enter |
||||
|
#define sqlite3_mutex_free sqlite3_api->mutex_free |
||||
|
#define sqlite3_mutex_leave sqlite3_api->mutex_leave |
||||
|
#define sqlite3_mutex_try sqlite3_api->mutex_try |
||||
|
#define sqlite3_open_v2 sqlite3_api->open_v2 |
||||
|
#define sqlite3_release_memory sqlite3_api->release_memory |
||||
|
#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem |
||||
|
#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig |
||||
|
#define sqlite3_sleep sqlite3_api->sleep |
||||
|
#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit |
||||
|
#define sqlite3_vfs_find sqlite3_api->vfs_find |
||||
|
#define sqlite3_vfs_register sqlite3_api->vfs_register |
||||
|
#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister |
||||
|
#define sqlite3_threadsafe sqlite3_api->xthreadsafe |
||||
|
#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob |
||||
|
#define sqlite3_result_error_code sqlite3_api->result_error_code |
||||
|
#define sqlite3_test_control sqlite3_api->test_control |
||||
|
#define sqlite3_randomness sqlite3_api->randomness |
||||
|
#define sqlite3_context_db_handle sqlite3_api->context_db_handle |
||||
|
#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes |
||||
|
#define sqlite3_limit sqlite3_api->limit |
||||
|
#define sqlite3_next_stmt sqlite3_api->next_stmt |
||||
|
#define sqlite3_sql sqlite3_api->sql |
||||
|
#define sqlite3_status sqlite3_api->status |
||||
|
#define sqlite3_backup_finish sqlite3_api->backup_finish |
||||
|
#define sqlite3_backup_init sqlite3_api->backup_init |
||||
|
#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount |
||||
|
#define sqlite3_backup_remaining sqlite3_api->backup_remaining |
||||
|
#define sqlite3_backup_step sqlite3_api->backup_step |
||||
|
#define sqlite3_compileoption_get sqlite3_api->compileoption_get |
||||
|
#define sqlite3_compileoption_used sqlite3_api->compileoption_used |
||||
|
#define sqlite3_create_function_v2 sqlite3_api->create_function_v2 |
||||
|
#define sqlite3_db_config sqlite3_api->db_config |
||||
|
#define sqlite3_db_mutex sqlite3_api->db_mutex |
||||
|
#define sqlite3_db_status sqlite3_api->db_status |
||||
|
#define sqlite3_extended_errcode sqlite3_api->extended_errcode |
||||
|
#define sqlite3_log sqlite3_api->log |
||||
|
#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64 |
||||
|
#define sqlite3_sourceid sqlite3_api->sourceid |
||||
|
#define sqlite3_stmt_status sqlite3_api->stmt_status |
||||
|
#define sqlite3_strnicmp sqlite3_api->strnicmp |
||||
|
#define sqlite3_unlock_notify sqlite3_api->unlock_notify |
||||
|
#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint |
||||
|
#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint |
||||
|
#define sqlite3_wal_hook sqlite3_api->wal_hook |
||||
|
#define sqlite3_blob_reopen sqlite3_api->blob_reopen |
||||
|
#define sqlite3_vtab_config sqlite3_api->vtab_config |
||||
|
#define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict |
||||
|
/* Version 3.7.16 and later */ |
||||
|
#define sqlite3_close_v2 sqlite3_api->close_v2 |
||||
|
#define sqlite3_db_filename sqlite3_api->db_filename |
||||
|
#define sqlite3_db_readonly sqlite3_api->db_readonly |
||||
|
#define sqlite3_db_release_memory sqlite3_api->db_release_memory |
||||
|
#define sqlite3_errstr sqlite3_api->errstr |
||||
|
#define sqlite3_stmt_busy sqlite3_api->stmt_busy |
||||
|
#define sqlite3_stmt_readonly sqlite3_api->stmt_readonly |
||||
|
#define sqlite3_stricmp sqlite3_api->stricmp |
||||
|
#define sqlite3_uri_boolean sqlite3_api->uri_boolean |
||||
|
#define sqlite3_uri_int64 sqlite3_api->uri_int64 |
||||
|
#define sqlite3_uri_parameter sqlite3_api->uri_parameter |
||||
|
#define sqlite3_uri_vsnprintf sqlite3_api->vsnprintf |
||||
|
#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2 |
||||
|
/* Version 3.8.7 and later */ |
||||
|
#define sqlite3_auto_extension sqlite3_api->auto_extension |
||||
|
#define sqlite3_bind_blob64 sqlite3_api->bind_blob64 |
||||
|
#define sqlite3_bind_text64 sqlite3_api->bind_text64 |
||||
|
#define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension |
||||
|
#define sqlite3_load_extension sqlite3_api->load_extension |
||||
|
#define sqlite3_malloc64 sqlite3_api->malloc64 |
||||
|
#define sqlite3_msize sqlite3_api->msize |
||||
|
#define sqlite3_realloc64 sqlite3_api->realloc64 |
||||
|
#define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension |
||||
|
#define sqlite3_result_blob64 sqlite3_api->result_blob64 |
||||
|
#define sqlite3_result_text64 sqlite3_api->result_text64 |
||||
|
#define sqlite3_strglob sqlite3_api->strglob |
||||
|
/* Version 3.8.11 and later */ |
||||
|
#define sqlite3_value_dup sqlite3_api->value_dup |
||||
|
#define sqlite3_value_free sqlite3_api->value_free |
||||
|
#define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64 |
||||
|
#define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64 |
||||
|
/* Version 3.9.0 and later */ |
||||
|
#define sqlite3_value_subtype sqlite3_api->value_subtype |
||||
|
#define sqlite3_result_subtype sqlite3_api->result_subtype |
||||
|
/* Version 3.10.0 and later */ |
||||
|
#define sqlite3_status64 sqlite3_api->status64 |
||||
|
#define sqlite3_strlike sqlite3_api->strlike |
||||
|
#define sqlite3_db_cacheflush sqlite3_api->db_cacheflush |
||||
|
/* Version 3.12.0 and later */ |
||||
|
#define sqlite3_system_errno sqlite3_api->system_errno |
||||
|
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ |
||||
|
|
||||
|
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) |
||||
|
/* This case when the file really is being compiled as a loadable |
||||
|
** extension */ |
||||
|
# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0; |
||||
|
# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v; |
||||
|
# define SQLITE_EXTENSION_INIT3 \ |
||||
|
extern const sqlite3_api_routines *sqlite3_api; |
||||
|
#else |
||||
|
/* This case when the file is being statically linked into the |
||||
|
** application */ |
||||
|
# define SQLITE_EXTENSION_INIT1 /*no-op*/ |
||||
|
# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */ |
||||
|
# define SQLITE_EXTENSION_INIT3 /*no-op*/ |
||||
|
#endif |
||||
|
|
||||
|
#endif /* _SQLITE3EXT_H_ */ |
@ -0,0 +1,27 @@ |
|||||
|
Copyright (c) 2014 The Gobrake Authors. All rights reserved. |
||||
|
|
||||
|
Redistribution and use in source and binary forms, with or without |
||||
|
modification, are permitted provided that the following conditions are |
||||
|
met: |
||||
|
|
||||
|
* Redistributions of source code must retain the above copyright |
||||
|
notice, this list of conditions and the following disclaimer. |
||||
|
* Redistributions in binary form must reproduce the above |
||||
|
copyright notice, this list of conditions and the following disclaimer |
||||
|
in the documentation and/or other materials provided with the |
||||
|
distribution. |
||||
|
* Neither the name of Google Inc. nor the names of its |
||||
|
contributors may be used to endorse or promote products derived from |
||||
|
this software without specific prior written permission. |
||||
|
|
||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,47 @@ |
|||||
|
# Airbrake Golang Notifier [![Build Status](https://circleci.com/gh/airbrake/gobrake.png?circle-token=4cbcbf1a58fa8275217247351a2db7250c1ef976)](https://circleci.com/gh/airbrake/gobrake) |
||||
|
|
||||
|
<img src="http://f.cl.ly/items/3J3h1L05222X3o1w2l2L/golang.jpg" width=800px> |
||||
|
|
||||
|
# Example |
||||
|
|
||||
|
```go |
||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
|
||||
|
"gopkg.in/airbrake/gobrake.v2" |
||||
|
) |
||||
|
|
||||
|
var airbrake = gobrake.NewNotifier(1234567, "FIXME") |
||||
|
|
||||
|
func init() { |
||||
|
airbrake.AddFilter(func(notice *gobrake.Notice) *gobrake.Notice { |
||||
|
notice.Context["environment"] = "production" |
||||
|
return notice |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func main() { |
||||
|
defer airbrake.WaitAndClose(5*time.Second) |
||||
|
defer airbrake.NotifyOnPanic() |
||||
|
|
||||
|
airbrake.Notify(errors.New("operation failed"), nil) |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
## Ignoring notices |
||||
|
|
||||
|
```go |
||||
|
airbrake.AddFilter(func(notice *gobrake.Notice) *gobrake.Notice { |
||||
|
if notice.Context["environment"] == "development" { |
||||
|
// Ignore notices in development environment. |
||||
|
return nil |
||||
|
} |
||||
|
return notice |
||||
|
}) |
||||
|
``` |
||||
|
|
||||
|
## Logging |
||||
|
|
||||
|
You can use [glog fork](https://github.com/airbrake/glog) to send your logs to Airbrake. |
@ -0,0 +1,37 @@ |
|||||
|
package gobrake_test |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"net/http" |
||||
|
"net/http/httptest" |
||||
|
"testing" |
||||
|
|
||||
|
"gopkg.in/airbrake/gobrake.v2" |
||||
|
) |
||||
|
|
||||
|
func BenchmarkSendNotice(b *testing.B) { |
||||
|
handler := func(w http.ResponseWriter, req *http.Request) { |
||||
|
w.WriteHeader(http.StatusCreated) |
||||
|
w.Write([]byte(`{"id":"123"}`)) |
||||
|
} |
||||
|
server := httptest.NewServer(http.HandlerFunc(handler)) |
||||
|
|
||||
|
notifier := gobrake.NewNotifier(1, "key") |
||||
|
notifier.SetHost(server.URL) |
||||
|
|
||||
|
notice := notifier.Notice(errors.New("benchmark"), nil, 0) |
||||
|
|
||||
|
b.ResetTimer() |
||||
|
|
||||
|
b.RunParallel(func(pb *testing.PB) { |
||||
|
for pb.Next() { |
||||
|
id, err := notifier.SendNotice(notice) |
||||
|
if err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
if id != "123" { |
||||
|
b.Fatalf("got %q, wanted 123", id) |
||||
|
} |
||||
|
} |
||||
|
}) |
||||
|
} |
@ -0,0 +1,14 @@ |
|||||
|
checkout: |
||||
|
post: |
||||
|
- rm -rf /home/ubuntu/.go_workspace/src/gopkg.in/airbrake/gobrake.v2 |
||||
|
- mkdir -p /home/ubuntu/.go_workspace/src/gopkg.in/airbrake |
||||
|
- mv /home/ubuntu/gobrake /home/ubuntu/.go_workspace/src/gopkg.in/airbrake/gobrake.v2 |
||||
|
|
||||
|
dependencies: |
||||
|
override: |
||||
|
- go get github.com/onsi/ginkgo |
||||
|
- go get github.com/onsi/gomega |
||||
|
|
||||
|
test: |
||||
|
override: |
||||
|
- go test gopkg.in/airbrake/gobrake.v2 |
@ -0,0 +1,16 @@ |
|||||
|
package gobrake |
||||
|
|
||||
|
import ( |
||||
|
"log" |
||||
|
"os" |
||||
|
) |
||||
|
|
||||
|
var logger *log.Logger |
||||
|
|
||||
|
func init() { |
||||
|
SetLogger(log.New(os.Stderr, "gobrake: ", log.LstdFlags)) |
||||
|
} |
||||
|
|
||||
|
func SetLogger(l *log.Logger) { |
||||
|
logger = l |
||||
|
} |
@ -0,0 +1,78 @@ |
|||||
|
package gobrake |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"net/http" |
||||
|
) |
||||
|
|
||||
|
type Error struct { |
||||
|
Type string `json:"type"` |
||||
|
Message string `json:"message"` |
||||
|
Backtrace []StackFrame `json:"backtrace"` |
||||
|
} |
||||
|
|
||||
|
type Notice struct { |
||||
|
Errors []Error `json:"errors"` |
||||
|
Context map[string]interface{} `json:"context"` |
||||
|
Env map[string]interface{} `json:"environment"` |
||||
|
Session map[string]interface{} `json:"session"` |
||||
|
Params map[string]interface{} `json:"params"` |
||||
|
} |
||||
|
|
||||
|
func (n *Notice) String() string { |
||||
|
if len(n.Errors) == 0 { |
||||
|
return fmt.Sprint(n) |
||||
|
} |
||||
|
e := n.Errors[0] |
||||
|
return fmt.Sprintf("%s: %s", e.Type, e.Message) |
||||
|
} |
||||
|
|
||||
|
func NewNotice(e interface{}, req *http.Request, depth int) *Notice { |
||||
|
stack := stack(depth) |
||||
|
notice := &Notice{ |
||||
|
Errors: []Error{ |
||||
|
{ |
||||
|
Type: fmt.Sprintf("%T", e), |
||||
|
Message: fmt.Sprint(e), |
||||
|
Backtrace: stack, |
||||
|
}, |
||||
|
}, |
||||
|
Context: map[string]interface{}{ |
||||
|
"notifier": map[string]interface{}{ |
||||
|
"name": "gobrake", |
||||
|
"version": "2.0.3", |
||||
|
"url": "https://github.com/airbrake/gobrake", |
||||
|
}, |
||||
|
}, |
||||
|
Env: map[string]interface{}{}, |
||||
|
Session: map[string]interface{}{}, |
||||
|
Params: map[string]interface{}{}, |
||||
|
} |
||||
|
|
||||
|
if req != nil { |
||||
|
notice.Context["url"] = req.URL.String() |
||||
|
if ua := req.Header.Get("User-Agent"); ua != "" { |
||||
|
notice.Context["userAgent"] = ua |
||||
|
} |
||||
|
|
||||
|
for k, v := range req.Header { |
||||
|
if len(v) == 1 { |
||||
|
notice.Env[k] = v[0] |
||||
|
} else { |
||||
|
notice.Env[k] = v |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if err := req.ParseForm(); err == nil { |
||||
|
for k, v := range req.Form { |
||||
|
if len(v) == 1 { |
||||
|
notice.Params[k] = v[0] |
||||
|
} else { |
||||
|
notice.Params[k] = v |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return notice |
||||
|
} |
@ -0,0 +1,238 @@ |
|||||
|
package gobrake // import "gopkg.in/airbrake/gobrake.v2"
|
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"crypto/tls" |
||||
|
"encoding/json" |
||||
|
"errors" |
||||
|
"fmt" |
||||
|
"net" |
||||
|
"net/http" |
||||
|
"os" |
||||
|
"runtime" |
||||
|
"sync" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
const defaultAirbrakeHost = "https://airbrake.io" |
||||
|
|
||||
|
const statusTooManyRequests = 429 |
||||
|
|
||||
|
var ( |
||||
|
errClosed = errors.New("gobrake: notifier is closed") |
||||
|
errRateLimited = errors.New("gobrake: you are rate limited") |
||||
|
) |
||||
|
|
||||
|
var httpClient = &http.Client{ |
||||
|
Transport: &http.Transport{ |
||||
|
Proxy: http.ProxyFromEnvironment, |
||||
|
Dial: (&net.Dialer{ |
||||
|
Timeout: 15 * time.Second, |
||||
|
KeepAlive: 30 * time.Second, |
||||
|
}).Dial, |
||||
|
TLSHandshakeTimeout: 10 * time.Second, |
||||
|
TLSClientConfig: &tls.Config{ |
||||
|
ClientSessionCache: tls.NewLRUClientSessionCache(1024), |
||||
|
}, |
||||
|
MaxIdleConnsPerHost: 10, |
||||
|
ResponseHeaderTimeout: 10 * time.Second, |
||||
|
}, |
||||
|
Timeout: 10 * time.Second, |
||||
|
} |
||||
|
|
||||
|
var buffers = sync.Pool{ |
||||
|
New: func() interface{} { |
||||
|
return new(bytes.Buffer) |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
type filter func(*Notice) *Notice |
||||
|
|
||||
|
type Notifier struct { |
||||
|
// http.Client that is used to interact with Airbrake API.
|
||||
|
Client *http.Client |
||||
|
|
||||
|
projectId int64 |
||||
|
projectKey string |
||||
|
createNoticeURL string |
||||
|
|
||||
|
context map[string]string |
||||
|
filters []filter |
||||
|
|
||||
|
wg sync.WaitGroup |
||||
|
noticeCh chan *Notice |
||||
|
closed chan struct{} |
||||
|
} |
||||
|
|
||||
|
func NewNotifier(projectId int64, projectKey string) *Notifier { |
||||
|
n := &Notifier{ |
||||
|
projectId: projectId, |
||||
|
projectKey: projectKey, |
||||
|
createNoticeURL: getCreateNoticeURL(defaultAirbrakeHost, projectId, projectKey), |
||||
|
|
||||
|
Client: httpClient, |
||||
|
|
||||
|
context: map[string]string{ |
||||
|
"language": runtime.Version(), |
||||
|
"os": runtime.GOOS, |
||||
|
"architecture": runtime.GOARCH, |
||||
|
}, |
||||
|
|
||||
|
noticeCh: make(chan *Notice, 1000), |
||||
|
closed: make(chan struct{}), |
||||
|
} |
||||
|
if hostname, err := os.Hostname(); err == nil { |
||||
|
n.context["hostname"] = hostname |
||||
|
} |
||||
|
if wd, err := os.Getwd(); err == nil { |
||||
|
n.context["rootDirectory"] = wd |
||||
|
} |
||||
|
for i := 0; i < 10; i++ { |
||||
|
go n.worker() |
||||
|
} |
||||
|
return n |
||||
|
} |
||||
|
|
||||
|
// Sets Airbrake host name. Default is https://airbrake.io.
|
||||
|
func (n *Notifier) SetHost(h string) { |
||||
|
n.createNoticeURL = getCreateNoticeURL(h, n.projectId, n.projectKey) |
||||
|
} |
||||
|
|
||||
|
// AddFilter adds filter that can modify or ignore notice.
|
||||
|
func (n *Notifier) AddFilter(fn filter) { |
||||
|
n.filters = append(n.filters, fn) |
||||
|
} |
||||
|
|
||||
|
// Notify notifies Airbrake about the error.
|
||||
|
func (n *Notifier) Notify(e interface{}, req *http.Request) { |
||||
|
notice := n.Notice(e, req, 1) |
||||
|
n.SendNoticeAsync(notice) |
||||
|
} |
||||
|
|
||||
|
// Notice returns Aibrake notice created from error and request. depth
|
||||
|
// determines which call frame to use when constructing backtrace.
|
||||
|
func (n *Notifier) Notice(err interface{}, req *http.Request, depth int) *Notice { |
||||
|
notice := NewNotice(err, req, depth+3) |
||||
|
for k, v := range n.context { |
||||
|
notice.Context[k] = v |
||||
|
} |
||||
|
return notice |
||||
|
} |
||||
|
|
||||
|
type sendResponse struct { |
||||
|
Id string `json:"id"` |
||||
|
} |
||||
|
|
||||
|
// SendNotice sends notice to Airbrake.
|
||||
|
func (n *Notifier) SendNotice(notice *Notice) (string, error) { |
||||
|
for _, fn := range n.filters { |
||||
|
notice = fn(notice) |
||||
|
if notice == nil { |
||||
|
// Notice is ignored.
|
||||
|
return "", nil |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
buf := buffers.Get().(*bytes.Buffer) |
||||
|
defer buffers.Put(buf) |
||||
|
|
||||
|
buf.Reset() |
||||
|
if err := json.NewEncoder(buf).Encode(notice); err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
|
||||
|
resp, err := n.Client.Post(n.createNoticeURL, "application/json", buf) |
||||
|
if err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
defer resp.Body.Close() |
||||
|
|
||||
|
buf.Reset() |
||||
|
_, err = buf.ReadFrom(resp.Body) |
||||
|
if err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
|
||||
|
if resp.StatusCode != http.StatusCreated { |
||||
|
if resp.StatusCode == statusTooManyRequests { |
||||
|
return "", errRateLimited |
||||
|
} |
||||
|
err := fmt.Errorf("gobrake: got response code=%d, wanted 201 CREATED", resp.StatusCode) |
||||
|
return "", err |
||||
|
} |
||||
|
|
||||
|
var sendResp sendResponse |
||||
|
err = json.NewDecoder(buf).Decode(&sendResp) |
||||
|
if err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
|
||||
|
return sendResp.Id, nil |
||||
|
} |
||||
|
|
||||
|
// SendNoticeAsync acts as SendNotice, but sends notice asynchronously
|
||||
|
// and pending notices can be flushed with Flush.
|
||||
|
func (n *Notifier) SendNoticeAsync(notice *Notice) { |
||||
|
n.wg.Add(1) |
||||
|
select { |
||||
|
case n.noticeCh <- notice: |
||||
|
default: |
||||
|
n.wg.Done() |
||||
|
logger.Printf( |
||||
|
"notice=%q is ignored, because queue is full (len=%d)", |
||||
|
notice, len(n.noticeCh), |
||||
|
) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (n *Notifier) worker() { |
||||
|
for { |
||||
|
select { |
||||
|
case notice := <-n.noticeCh: |
||||
|
if _, err := n.SendNotice(notice); err != nil && err != errRateLimited { |
||||
|
logger.Printf("gobrake failed reporting notice=%q: error=%q", notice, err) |
||||
|
} |
||||
|
n.wg.Done() |
||||
|
case <-n.closed: |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// NotifyOnPanic notifies Airbrake about the panic and should be used
|
||||
|
// with defer statement.
|
||||
|
func (n *Notifier) NotifyOnPanic() { |
||||
|
if v := recover(); v != nil { |
||||
|
notice := n.Notice(v, nil, 3) |
||||
|
n.SendNotice(notice) |
||||
|
panic(v) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Flush does nothing.
|
||||
|
//
|
||||
|
// Deprecated. Use CloseAndWait instead.
|
||||
|
func (n *Notifier) Flush() {} |
||||
|
|
||||
|
// WaitAndClose waits for pending requests to finish and then closes the notifier.
|
||||
|
func (n *Notifier) WaitAndClose(timeout time.Duration) error { |
||||
|
done := make(chan struct{}) |
||||
|
go func() { |
||||
|
n.wg.Wait() |
||||
|
close(done) |
||||
|
}() |
||||
|
select { |
||||
|
case <-done: |
||||
|
case <-time.After(timeout): |
||||
|
} |
||||
|
|
||||
|
close(n.closed) |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func getCreateNoticeURL(host string, projectId int64, key string) string { |
||||
|
return fmt.Sprintf( |
||||
|
"%s/api/v3/projects/%d/notices?key=%s", |
||||
|
host, projectId, key, |
||||
|
) |
||||
|
} |
@ -0,0 +1,136 @@ |
|||||
|
package gobrake_test |
||||
|
|
||||
|
import ( |
||||
|
"encoding/json" |
||||
|
"io/ioutil" |
||||
|
"net/http" |
||||
|
"net/http/httptest" |
||||
|
"net/url" |
||||
|
"os" |
||||
|
"runtime" |
||||
|
"testing" |
||||
|
|
||||
|
. "github.com/onsi/ginkgo" |
||||
|
. "github.com/onsi/gomega" |
||||
|
|
||||
|
"gopkg.in/airbrake/gobrake.v2" |
||||
|
) |
||||
|
|
||||
|
func TestGobrake(t *testing.T) { |
||||
|
RegisterFailHandler(Fail) |
||||
|
RunSpecs(t, "gobrake") |
||||
|
} |
||||
|
|
||||
|
var _ = Describe("Notifier", func() { |
||||
|
var notifier *gobrake.Notifier |
||||
|
var sentNotice *gobrake.Notice |
||||
|
|
||||
|
notify := func(e interface{}, req *http.Request) { |
||||
|
notifier.Notify(e, req) |
||||
|
notifier.Flush() |
||||
|
} |
||||
|
|
||||
|
BeforeEach(func() { |
||||
|
handler := func(w http.ResponseWriter, req *http.Request) { |
||||
|
b, err := ioutil.ReadAll(req.Body) |
||||
|
if err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
|
||||
|
sentNotice = &gobrake.Notice{} |
||||
|
err = json.Unmarshal(b, sentNotice) |
||||
|
Expect(err).To(BeNil()) |
||||
|
|
||||
|
w.WriteHeader(http.StatusCreated) |
||||
|
w.Write([]byte(`{"id":"123"}`)) |
||||
|
} |
||||
|
server := httptest.NewServer(http.HandlerFunc(handler)) |
||||
|
|
||||
|
notifier = gobrake.NewNotifier(1, "key") |
||||
|
notifier.SetHost(server.URL) |
||||
|
}) |
||||
|
|
||||
|
It("reports error and backtrace", func() { |
||||
|
notify("hello", nil) |
||||
|
|
||||
|
e := sentNotice.Errors[0] |
||||
|
Expect(e.Type).To(Equal("string")) |
||||
|
Expect(e.Message).To(Equal("hello")) |
||||
|
Expect(e.Backtrace[0].File).To(ContainSubstring("notifier_test.go")) |
||||
|
}) |
||||
|
|
||||
|
It("Notice returns proper backtrace", func() { |
||||
|
notice := notifier.Notice("hello", nil, 0) |
||||
|
|
||||
|
e := notice.Errors[0] |
||||
|
Expect(e.Backtrace[0].File).To(ContainSubstring("notifier_test.go")) |
||||
|
}) |
||||
|
|
||||
|
It("reports context, env, session and params", func() { |
||||
|
wanted := notifier.Notice("hello", nil, 3) |
||||
|
wanted.Context["context1"] = "context1" |
||||
|
wanted.Env["env1"] = "value1" |
||||
|
wanted.Session["session1"] = "value1" |
||||
|
wanted.Params["param1"] = "value1" |
||||
|
|
||||
|
id, err := notifier.SendNotice(wanted) |
||||
|
Expect(err).To(BeNil()) |
||||
|
Expect(id).To(Equal("123")) |
||||
|
|
||||
|
Expect(sentNotice).To(Equal(wanted)) |
||||
|
}) |
||||
|
|
||||
|
It("reports context using SetContext", func() { |
||||
|
notifier.AddFilter(func(notice *gobrake.Notice) *gobrake.Notice { |
||||
|
notice.Context["environment"] = "production" |
||||
|
return notice |
||||
|
}) |
||||
|
notify("hello", nil) |
||||
|
|
||||
|
Expect(sentNotice.Context["environment"]).To(Equal("production")) |
||||
|
}) |
||||
|
|
||||
|
It("reports request", func() { |
||||
|
u, err := url.Parse("http://foo/bar") |
||||
|
Expect(err).To(BeNil()) |
||||
|
|
||||
|
req := &http.Request{ |
||||
|
URL: u, |
||||
|
Header: http.Header{ |
||||
|
"h1": {"h1v1", "h1v2"}, |
||||
|
"h2": {"h2v1"}, |
||||
|
"User-Agent": {"my_user_agent"}, |
||||
|
}, |
||||
|
Form: url.Values{ |
||||
|
"f1": {"f1v1"}, |
||||
|
"f2": {"f2v1", "f2v2"}, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
notify("hello", req) |
||||
|
|
||||
|
ctx := sentNotice.Context |
||||
|
Expect(ctx["url"]).To(Equal("http://foo/bar")) |
||||
|
Expect(ctx["userAgent"]).To(Equal("my_user_agent")) |
||||
|
|
||||
|
params := sentNotice.Params |
||||
|
Expect(params["f1"]).To(Equal("f1v1")) |
||||
|
Expect(params["f2"]).To(Equal([]interface{}{"f2v1", "f2v2"})) |
||||
|
|
||||
|
env := sentNotice.Env |
||||
|
Expect(env["h1"]).To(Equal([]interface{}{"h1v1", "h1v2"})) |
||||
|
Expect(env["h2"]).To(Equal("h2v1")) |
||||
|
}) |
||||
|
|
||||
|
It("collects and reports context", func() { |
||||
|
notify("hello", nil) |
||||
|
|
||||
|
hostname, _ := os.Hostname() |
||||
|
wd, _ := os.Getwd() |
||||
|
Expect(sentNotice.Context["language"]).To(Equal(runtime.Version())) |
||||
|
Expect(sentNotice.Context["os"]).To(Equal(runtime.GOOS)) |
||||
|
Expect(sentNotice.Context["architecture"]).To(Equal(runtime.GOARCH)) |
||||
|
Expect(sentNotice.Context["hostname"]).To(Equal(hostname)) |
||||
|
Expect(sentNotice.Context["rootDirectory"]).To(Equal(wd)) |
||||
|
}) |
||||
|
}) |
@ -0,0 +1,59 @@ |
|||||
|
package gobrake |
||||
|
|
||||
|
import ( |
||||
|
"runtime" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
func stackFilter(packageName, funcName string, file string, line int) bool { |
||||
|
return packageName == "runtime" && funcName == "panic" |
||||
|
} |
||||
|
|
||||
|
type StackFrame struct { |
||||
|
File string `json:"file"` |
||||
|
Line int `json:"line"` |
||||
|
Func string `json:"function"` |
||||
|
} |
||||
|
|
||||
|
func stack(depth int) []StackFrame { |
||||
|
stack := []StackFrame{} |
||||
|
for i := depth; ; i++ { |
||||
|
pc, file, line, ok := runtime.Caller(i) |
||||
|
if !ok { |
||||
|
break |
||||
|
} |
||||
|
packageName, funcName := packageFuncName(pc) |
||||
|
if stackFilter(packageName, funcName, file, line) { |
||||
|
stack = stack[:0] |
||||
|
continue |
||||
|
} |
||||
|
stack = append(stack, StackFrame{ |
||||
|
File: file, |
||||
|
Line: line, |
||||
|
Func: funcName, |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
return stack |
||||
|
} |
||||
|
|
||||
|
func packageFuncName(pc uintptr) (string, string) { |
||||
|
f := runtime.FuncForPC(pc) |
||||
|
if f == nil { |
||||
|
return "", "" |
||||
|
} |
||||
|
|
||||
|
packageName := "" |
||||
|
funcName := f.Name() |
||||
|
|
||||
|
if ind := strings.LastIndex(funcName, "/"); ind > 0 { |
||||
|
packageName += funcName[:ind+1] |
||||
|
funcName = funcName[ind+1:] |
||||
|
} |
||||
|
if ind := strings.Index(funcName, "."); ind > 0 { |
||||
|
packageName += funcName[:ind] |
||||
|
funcName = funcName[ind+1:] |
||||
|
} |
||||
|
|
||||
|
return packageName, funcName |
||||
|
} |
@ -0,0 +1,21 @@ |
|||||
|
The MIT License (MIT) |
||||
|
|
||||
|
Copyright (c) 2015 Gemnasium |
||||
|
|
||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
|
of this software and associated documentation files (the "Software"), to deal |
||||
|
in the Software without restriction, including without limitation the rights |
||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
|
copies of the Software, and to permit persons to whom the Software is |
||||
|
furnished to do so, subject to the following conditions: |
||||
|
|
||||
|
The above copyright notice and this permission notice shall be included in |
||||
|
all copies or substantial portions of the Software. |
||||
|
|
||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
||||
|
THE SOFTWARE. |
@ -0,0 +1,33 @@ |
|||||
|
# Airbrake Hook for Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:" /> |
||||
|
|
||||
|
Use this hook to send your errors to [Airbrake](https://airbrake.io/). |
||||
|
This hook is using the [official airbrake go package](https://github.com/airbrake/gobrake), and will hit the api V3. |
||||
|
The hook is async for `log.Error`, but blocking for the notice to be sent with `log.Fatal` and `log.Panic`. |
||||
|
|
||||
|
All logrus fields will be sent as context fields on Airbrake. |
||||
|
|
||||
|
## Usage |
||||
|
|
||||
|
The hook must be configured with: |
||||
|
|
||||
|
* A project ID (found in your your Airbrake project settings) |
||||
|
* An API key ID (found in your your Airbrake project settings) |
||||
|
* The name of the current environment ("development", "staging", "production", ...) |
||||
|
|
||||
|
```go |
||||
|
import ( |
||||
|
"log/syslog" |
||||
|
"github.com/Sirupsen/logrus" |
||||
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
log := logrus.New() |
||||
|
log.AddHook(airbrake.NewHook(123, "xyz", "development")) |
||||
|
log.Error("some logging message") // The error is sent to airbrake in background |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
Note that if environment == "development", the hook will not send anything to airbrake. |
||||
|
|
||||
|
|
Some files were not shown because too many files changed in this diff
Write
Preview
Loading…
Cancel
Save
Reference in new issue