mirror of https://github.com/matrix-org/go-neb.git
Kegan Dougal
8 years ago
165 changed files with 42226 additions and 0 deletions
-
52vendor/manifest
-
666vendor/src/github.com/bradfitz/gomemcache/memcache/memcache.go
-
230vendor/src/github.com/bradfitz/gomemcache/memcache/memcache_test.go
-
114vendor/src/github.com/bradfitz/gomemcache/memcache/selector.go
-
39vendor/src/github.com/bradfitz/gomemcache/memcache/selector_test.go
-
54vendor/src/github.com/garyburd/redigo/internal/commandinfo.go
-
27vendor/src/github.com/garyburd/redigo/internal/commandinfo_test.go
-
68vendor/src/github.com/garyburd/redigo/internal/redistest/testdb.go
-
570vendor/src/github.com/garyburd/redigo/redis/conn.go
-
670vendor/src/github.com/garyburd/redigo/redis/conn_test.go
-
168vendor/src/github.com/garyburd/redigo/redis/doc.go
-
117vendor/src/github.com/garyburd/redigo/redis/log.go
-
397vendor/src/github.com/garyburd/redigo/redis/pool.go
-
684vendor/src/github.com/garyburd/redigo/redis/pool_test.go
-
144vendor/src/github.com/garyburd/redigo/redis/pubsub.go
-
148vendor/src/github.com/garyburd/redigo/redis/pubsub_test.go
-
44vendor/src/github.com/garyburd/redigo/redis/redis.go
-
393vendor/src/github.com/garyburd/redigo/redis/reply.go
-
179vendor/src/github.com/garyburd/redigo/redis/reply_test.go
-
555vendor/src/github.com/garyburd/redigo/redis/scan.go
-
440vendor/src/github.com/garyburd/redigo/redis/scan_test.go
-
86vendor/src/github.com/garyburd/redigo/redis/script.go
-
100vendor/src/github.com/garyburd/redigo/redis/script_test.go
-
177vendor/src/github.com/garyburd/redigo/redis/test_test.go
-
113vendor/src/github.com/garyburd/redigo/redis/zpop_example_test.go
-
15vendor/src/github.com/golang/snappy/AUTHORS
-
37vendor/src/github.com/golang/snappy/CONTRIBUTORS
-
27vendor/src/github.com/golang/snappy/LICENSE
-
107vendor/src/github.com/golang/snappy/README
-
77vendor/src/github.com/golang/snappy/cmd/snappytool/main.cpp
-
237vendor/src/github.com/golang/snappy/decode.go
-
14vendor/src/github.com/golang/snappy/decode_amd64.go
-
490vendor/src/github.com/golang/snappy/decode_amd64.s
-
101vendor/src/github.com/golang/snappy/decode_other.go
-
285vendor/src/github.com/golang/snappy/encode.go
-
29vendor/src/github.com/golang/snappy/encode_amd64.go
-
730vendor/src/github.com/golang/snappy/encode_amd64.s
-
238vendor/src/github.com/golang/snappy/encode_other.go
-
1965vendor/src/github.com/golang/snappy/golden_test.go
-
87vendor/src/github.com/golang/snappy/snappy.go
-
1353vendor/src/github.com/golang/snappy/snappy_test.go
-
396vendor/src/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt
-
BINvendor/src/github.com/golang/snappy/testdata/Mark.Twain-Tom.Sawyer.txt.rawsnappy
-
202vendor/src/github.com/google/btree/LICENSE
-
12vendor/src/github.com/google/btree/README.md
-
738vendor/src/github.com/google/btree/btree.go
-
76vendor/src/github.com/google/btree/btree_mem.go
-
563vendor/src/github.com/google/btree/btree_test.go
-
7vendor/src/github.com/gregjones/httpcache/LICENSE.txt
-
25vendor/src/github.com/gregjones/httpcache/README.md
-
61vendor/src/github.com/gregjones/httpcache/diskcache/diskcache.go
-
42vendor/src/github.com/gregjones/httpcache/diskcache/diskcache_test.go
-
594vendor/src/github.com/gregjones/httpcache/httpcache.go
-
1208vendor/src/github.com/gregjones/httpcache/httpcache_test.go
-
51vendor/src/github.com/gregjones/httpcache/leveldbcache/leveldbcache.go
-
46vendor/src/github.com/gregjones/httpcache/leveldbcache/leveldbcache_test.go
-
61vendor/src/github.com/gregjones/httpcache/memcache/appengine.go
-
44vendor/src/github.com/gregjones/httpcache/memcache/appengine_test.go
-
60vendor/src/github.com/gregjones/httpcache/memcache/memcache.go
-
47vendor/src/github.com/gregjones/httpcache/memcache/memcache_test.go
-
43vendor/src/github.com/gregjones/httpcache/redis/redis.go
-
43vendor/src/github.com/gregjones/httpcache/redis/redis_test.go
-
19vendor/src/github.com/peterbourgon/diskv/LICENSE
-
141vendor/src/github.com/peterbourgon/diskv/README.md
-
253vendor/src/github.com/peterbourgon/diskv/basic_test.go
-
64vendor/src/github.com/peterbourgon/diskv/compression.go
-
72vendor/src/github.com/peterbourgon/diskv/compression_test.go
-
578vendor/src/github.com/peterbourgon/diskv/diskv.go
-
63vendor/src/github.com/peterbourgon/diskv/examples/content-addressable-store/cas.go
-
30vendor/src/github.com/peterbourgon/diskv/examples/super-simple-store/super-simple-store.go
-
76vendor/src/github.com/peterbourgon/diskv/import_test.go
-
115vendor/src/github.com/peterbourgon/diskv/index.go
-
148vendor/src/github.com/peterbourgon/diskv/index_test.go
-
121vendor/src/github.com/peterbourgon/diskv/issues_test.go
-
231vendor/src/github.com/peterbourgon/diskv/keys_test.go
-
153vendor/src/github.com/peterbourgon/diskv/speed_test.go
-
117vendor/src/github.com/peterbourgon/diskv/stream_test.go
-
349vendor/src/github.com/syndtr/goleveldb/leveldb/batch.go
-
147vendor/src/github.com/syndtr/goleveldb/leveldb/batch_test.go
-
507vendor/src/github.com/syndtr/goleveldb/leveldb/bench_test.go
-
29vendor/src/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go
-
705vendor/src/github.com/syndtr/goleveldb/leveldb/cache/cache.go
-
563vendor/src/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go
-
195vendor/src/github.com/syndtr/goleveldb/leveldb/cache/lru.go
-
67vendor/src/github.com/syndtr/goleveldb/leveldb/comparer.go
-
51vendor/src/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
-
57vendor/src/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
-
496vendor/src/github.com/syndtr/goleveldb/leveldb/corrupt_test.go
-
1091vendor/src/github.com/syndtr/goleveldb/leveldb/db.go
-
826vendor/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
-
360vendor/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
-
183vendor/src/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
-
234vendor/src/github.com/syndtr/goleveldb/leveldb/db_state.go
-
2925vendor/src/github.com/syndtr/goleveldb/leveldb/db_test.go
-
325vendor/src/github.com/syndtr/goleveldb/leveldb/db_transaction.go
-
102vendor/src/github.com/syndtr/goleveldb/leveldb/db_util.go
-
443vendor/src/github.com/syndtr/goleveldb/leveldb/db_write.go
-
90vendor/src/github.com/syndtr/goleveldb/leveldb/doc.go
-
20vendor/src/github.com/syndtr/goleveldb/leveldb/errors.go
-
78vendor/src/github.com/syndtr/goleveldb/leveldb/errors/errors.go
@ -0,0 +1,666 @@ |
|||||
|
/* |
||||
|
Copyright 2011 Google Inc. |
||||
|
|
||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
|
you may not use this file except in compliance with the License. |
||||
|
You may obtain a copy of the License at |
||||
|
|
||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
|
||||
|
Unless required by applicable law or agreed to in writing, software |
||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
|
See the License for the specific language governing permissions and |
||||
|
limitations under the License. |
||||
|
*/ |
||||
|
|
||||
|
// Package memcache provides a client for the memcached cache server.
|
||||
|
package memcache |
||||
|
|
||||
|
import ( |
||||
|
"bufio" |
||||
|
"bytes" |
||||
|
"errors" |
||||
|
"fmt" |
||||
|
"io" |
||||
|
"io/ioutil" |
||||
|
"net" |
||||
|
|
||||
|
"strconv" |
||||
|
"strings" |
||||
|
"sync" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
// Similar to:
|
||||
|
// http://code.google.com/appengine/docs/go/memcache/reference.html
|
||||
|
|
||||
|
var ( |
||||
|
// ErrCacheMiss means that a Get failed because the item wasn't present.
|
||||
|
ErrCacheMiss = errors.New("memcache: cache miss") |
||||
|
|
||||
|
// ErrCASConflict means that a CompareAndSwap call failed due to the
|
||||
|
// cached value being modified between the Get and the CompareAndSwap.
|
||||
|
// If the cached value was simply evicted rather than replaced,
|
||||
|
// ErrNotStored will be returned instead.
|
||||
|
ErrCASConflict = errors.New("memcache: compare-and-swap conflict") |
||||
|
|
||||
|
// ErrNotStored means that a conditional write operation (i.e. Add or
|
||||
|
// CompareAndSwap) failed because the condition was not satisfied.
|
||||
|
ErrNotStored = errors.New("memcache: item not stored") |
||||
|
|
||||
|
// ErrServer means that a server error occurred.
|
||||
|
ErrServerError = errors.New("memcache: server error") |
||||
|
|
||||
|
// ErrNoStats means that no statistics were available.
|
||||
|
ErrNoStats = errors.New("memcache: no statistics available") |
||||
|
|
||||
|
// ErrMalformedKey is returned when an invalid key is used.
|
||||
|
// Keys must be at maximum 250 bytes long, ASCII, and not
|
||||
|
// contain whitespace or control characters.
|
||||
|
ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters") |
||||
|
|
||||
|
// ErrNoServers is returned when no servers are configured or available.
|
||||
|
ErrNoServers = errors.New("memcache: no servers configured or available") |
||||
|
) |
||||
|
|
||||
|
// DefaultTimeout is the default socket read/write timeout.
|
||||
|
const DefaultTimeout = 100 * time.Millisecond |
||||
|
|
||||
|
const ( |
||||
|
buffered = 8 // arbitrary buffered channel size, for readability
|
||||
|
maxIdleConnsPerAddr = 2 // TODO(bradfitz): make this configurable?
|
||||
|
) |
||||
|
|
||||
|
// resumableError returns true if err is only a protocol-level cache error.
|
||||
|
// This is used to determine whether or not a server connection should
|
||||
|
// be re-used or not. If an error occurs, by default we don't reuse the
|
||||
|
// connection, unless it was just a cache error.
|
||||
|
func resumableError(err error) bool { |
||||
|
switch err { |
||||
|
case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey: |
||||
|
return true |
||||
|
} |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
func legalKey(key string) bool { |
||||
|
if len(key) > 250 { |
||||
|
return false |
||||
|
} |
||||
|
for i := 0; i < len(key); i++ { |
||||
|
if key[i] <= ' ' || key[i] > 0x7e { |
||||
|
return false |
||||
|
} |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
var ( |
||||
|
crlf = []byte("\r\n") |
||||
|
space = []byte(" ") |
||||
|
resultOK = []byte("OK\r\n") |
||||
|
resultStored = []byte("STORED\r\n") |
||||
|
resultNotStored = []byte("NOT_STORED\r\n") |
||||
|
resultExists = []byte("EXISTS\r\n") |
||||
|
resultNotFound = []byte("NOT_FOUND\r\n") |
||||
|
resultDeleted = []byte("DELETED\r\n") |
||||
|
resultEnd = []byte("END\r\n") |
||||
|
resultOk = []byte("OK\r\n") |
||||
|
resultTouched = []byte("TOUCHED\r\n") |
||||
|
|
||||
|
resultClientErrorPrefix = []byte("CLIENT_ERROR ") |
||||
|
) |
||||
|
|
||||
|
// New returns a memcache client using the provided server(s)
|
||||
|
// with equal weight. If a server is listed multiple times,
|
||||
|
// it gets a proportional amount of weight.
|
||||
|
func New(server ...string) *Client { |
||||
|
ss := new(ServerList) |
||||
|
ss.SetServers(server...) |
||||
|
return NewFromSelector(ss) |
||||
|
} |
||||
|
|
||||
|
// NewFromSelector returns a new Client using the provided ServerSelector.
|
||||
|
func NewFromSelector(ss ServerSelector) *Client { |
||||
|
return &Client{selector: ss} |
||||
|
} |
||||
|
|
||||
|
// Client is a memcache client.
|
||||
|
// It is safe for unlocked use by multiple concurrent goroutines.
|
||||
|
type Client struct { |
||||
|
// Timeout specifies the socket read/write timeout.
|
||||
|
// If zero, DefaultTimeout is used.
|
||||
|
Timeout time.Duration |
||||
|
|
||||
|
selector ServerSelector |
||||
|
|
||||
|
lk sync.Mutex |
||||
|
freeconn map[string][]*conn |
||||
|
} |
||||
|
|
||||
|
// Item is an item to be got or stored in a memcached server.
|
||||
|
type Item struct { |
||||
|
// Key is the Item's key (250 bytes maximum).
|
||||
|
Key string |
||||
|
|
||||
|
// Value is the Item's value.
|
||||
|
Value []byte |
||||
|
|
||||
|
// Flags are server-opaque flags whose semantics are entirely
|
||||
|
// up to the app.
|
||||
|
Flags uint32 |
||||
|
|
||||
|
// Expiration is the cache expiration time, in seconds: either a relative
|
||||
|
// time from now (up to 1 month), or an absolute Unix epoch time.
|
||||
|
// Zero means the Item has no expiration time.
|
||||
|
Expiration int32 |
||||
|
|
||||
|
// Compare and swap ID.
|
||||
|
casid uint64 |
||||
|
} |
||||
|
|
||||
|
// conn is a connection to a server.
|
||||
|
type conn struct { |
||||
|
nc net.Conn |
||||
|
rw *bufio.ReadWriter |
||||
|
addr net.Addr |
||||
|
c *Client |
||||
|
} |
||||
|
|
||||
|
// release returns this connection back to the client's free pool
|
||||
|
func (cn *conn) release() { |
||||
|
cn.c.putFreeConn(cn.addr, cn) |
||||
|
} |
||||
|
|
||||
|
func (cn *conn) extendDeadline() { |
||||
|
cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout())) |
||||
|
} |
||||
|
|
||||
|
// condRelease releases this connection if the error pointed to by err
|
||||
|
// is nil (not an error) or is only a protocol level error (e.g. a
|
||||
|
// cache miss). The purpose is to not recycle TCP connections that
|
||||
|
// are bad.
|
||||
|
func (cn *conn) condRelease(err *error) { |
||||
|
if *err == nil || resumableError(*err) { |
||||
|
cn.release() |
||||
|
} else { |
||||
|
cn.nc.Close() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (c *Client) putFreeConn(addr net.Addr, cn *conn) { |
||||
|
c.lk.Lock() |
||||
|
defer c.lk.Unlock() |
||||
|
if c.freeconn == nil { |
||||
|
c.freeconn = make(map[string][]*conn) |
||||
|
} |
||||
|
freelist := c.freeconn[addr.String()] |
||||
|
if len(freelist) >= maxIdleConnsPerAddr { |
||||
|
cn.nc.Close() |
||||
|
return |
||||
|
} |
||||
|
c.freeconn[addr.String()] = append(freelist, cn) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) { |
||||
|
c.lk.Lock() |
||||
|
defer c.lk.Unlock() |
||||
|
if c.freeconn == nil { |
||||
|
return nil, false |
||||
|
} |
||||
|
freelist, ok := c.freeconn[addr.String()] |
||||
|
if !ok || len(freelist) == 0 { |
||||
|
return nil, false |
||||
|
} |
||||
|
cn = freelist[len(freelist)-1] |
||||
|
c.freeconn[addr.String()] = freelist[:len(freelist)-1] |
||||
|
return cn, true |
||||
|
} |
||||
|
|
||||
|
func (c *Client) netTimeout() time.Duration { |
||||
|
if c.Timeout != 0 { |
||||
|
return c.Timeout |
||||
|
} |
||||
|
return DefaultTimeout |
||||
|
} |
||||
|
|
||||
|
// ConnectTimeoutError is the error type used when it takes
|
||||
|
// too long to connect to the desired host. This level of
|
||||
|
// detail can generally be ignored.
|
||||
|
type ConnectTimeoutError struct { |
||||
|
Addr net.Addr |
||||
|
} |
||||
|
|
||||
|
func (cte *ConnectTimeoutError) Error() string { |
||||
|
return "memcache: connect timeout to " + cte.Addr.String() |
||||
|
} |
||||
|
|
||||
|
func (c *Client) dial(addr net.Addr) (net.Conn, error) { |
||||
|
type connError struct { |
||||
|
cn net.Conn |
||||
|
err error |
||||
|
} |
||||
|
|
||||
|
nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout()) |
||||
|
if err == nil { |
||||
|
return nc, nil |
||||
|
} |
||||
|
|
||||
|
if ne, ok := err.(net.Error); ok && ne.Timeout() { |
||||
|
return nil, &ConnectTimeoutError{addr} |
||||
|
} |
||||
|
|
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
func (c *Client) getConn(addr net.Addr) (*conn, error) { |
||||
|
cn, ok := c.getFreeConn(addr) |
||||
|
if ok { |
||||
|
cn.extendDeadline() |
||||
|
return cn, nil |
||||
|
} |
||||
|
nc, err := c.dial(addr) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
cn = &conn{ |
||||
|
nc: nc, |
||||
|
addr: addr, |
||||
|
rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)), |
||||
|
c: c, |
||||
|
} |
||||
|
cn.extendDeadline() |
||||
|
return cn, nil |
||||
|
} |
||||
|
|
||||
|
func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error { |
||||
|
addr, err := c.selector.PickServer(item.Key) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
cn, err := c.getConn(addr) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
defer cn.condRelease(&err) |
||||
|
if err = fn(c, cn.rw, item); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (c *Client) FlushAll() error { |
||||
|
return c.selector.Each(c.flushAllFromAddr) |
||||
|
} |
||||
|
|
||||
|
// Get gets the item for the given key. ErrCacheMiss is returned for a
|
||||
|
// memcache cache miss. The key must be at most 250 bytes in length.
|
||||
|
func (c *Client) Get(key string) (item *Item, err error) { |
||||
|
err = c.withKeyAddr(key, func(addr net.Addr) error { |
||||
|
return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it }) |
||||
|
}) |
||||
|
if err == nil && item == nil { |
||||
|
err = ErrCacheMiss |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// Touch updates the expiry for the given key. The seconds parameter is either
|
||||
|
// a Unix timestamp or, if seconds is less than 1 month, the number of seconds
|
||||
|
// into the future at which time the item will expire. ErrCacheMiss is returned if the
|
||||
|
// key is not in the cache. The key must be at most 250 bytes in length.
|
||||
|
func (c *Client) Touch(key string, seconds int32) (err error) { |
||||
|
return c.withKeyAddr(key, func(addr net.Addr) error { |
||||
|
return c.touchFromAddr(addr, []string{key}, seconds) |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) { |
||||
|
if !legalKey(key) { |
||||
|
return ErrMalformedKey |
||||
|
} |
||||
|
addr, err := c.selector.PickServer(key) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return fn(addr) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) { |
||||
|
cn, err := c.getConn(addr) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
defer cn.condRelease(&err) |
||||
|
return fn(cn.rw) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error { |
||||
|
return c.withKeyAddr(key, func(addr net.Addr) error { |
||||
|
return c.withAddrRw(addr, fn) |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error { |
||||
|
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { |
||||
|
if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := rw.Flush(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := parseGetResponse(rw.Reader, cb); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return nil |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
// flushAllFromAddr send the flush_all command to the given addr
|
||||
|
func (c *Client) flushAllFromAddr(addr net.Addr) error { |
||||
|
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { |
||||
|
if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := rw.Flush(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
line, err := rw.ReadSlice('\n') |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
switch { |
||||
|
case bytes.Equal(line, resultOk): |
||||
|
break |
||||
|
default: |
||||
|
return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line)) |
||||
|
} |
||||
|
return nil |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error { |
||||
|
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { |
||||
|
for _, key := range keys { |
||||
|
if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := rw.Flush(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
line, err := rw.ReadSlice('\n') |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
switch { |
||||
|
case bytes.Equal(line, resultTouched): |
||||
|
break |
||||
|
case bytes.Equal(line, resultNotFound): |
||||
|
return ErrCacheMiss |
||||
|
default: |
||||
|
return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line)) |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
// GetMulti is a batch version of Get. The returned map from keys to
|
||||
|
// items may have fewer elements than the input slice, due to memcache
|
||||
|
// cache misses. Each key must be at most 250 bytes in length.
|
||||
|
// If no error is returned, the returned map will also be non-nil.
|
||||
|
func (c *Client) GetMulti(keys []string) (map[string]*Item, error) { |
||||
|
var lk sync.Mutex |
||||
|
m := make(map[string]*Item) |
||||
|
addItemToMap := func(it *Item) { |
||||
|
lk.Lock() |
||||
|
defer lk.Unlock() |
||||
|
m[it.Key] = it |
||||
|
} |
||||
|
|
||||
|
keyMap := make(map[net.Addr][]string) |
||||
|
for _, key := range keys { |
||||
|
if !legalKey(key) { |
||||
|
return nil, ErrMalformedKey |
||||
|
} |
||||
|
addr, err := c.selector.PickServer(key) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
keyMap[addr] = append(keyMap[addr], key) |
||||
|
} |
||||
|
|
||||
|
ch := make(chan error, buffered) |
||||
|
for addr, keys := range keyMap { |
||||
|
go func(addr net.Addr, keys []string) { |
||||
|
ch <- c.getFromAddr(addr, keys, addItemToMap) |
||||
|
}(addr, keys) |
||||
|
} |
||||
|
|
||||
|
var err error |
||||
|
for _ = range keyMap { |
||||
|
if ge := <-ch; ge != nil { |
||||
|
err = ge |
||||
|
} |
||||
|
} |
||||
|
return m, err |
||||
|
} |
||||
|
|
||||
|
// parseGetResponse reads a GET response from r and calls cb for each
|
||||
|
// read and allocated Item
|
||||
|
func parseGetResponse(r *bufio.Reader, cb func(*Item)) error { |
||||
|
for { |
||||
|
line, err := r.ReadSlice('\n') |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if bytes.Equal(line, resultEnd) { |
||||
|
return nil |
||||
|
} |
||||
|
it := new(Item) |
||||
|
size, err := scanGetResponseLine(line, it) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
it.Value, err = ioutil.ReadAll(io.LimitReader(r, int64(size)+2)) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if !bytes.HasSuffix(it.Value, crlf) { |
||||
|
return fmt.Errorf("memcache: corrupt get result read") |
||||
|
} |
||||
|
it.Value = it.Value[:size] |
||||
|
cb(it) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// scanGetResponseLine populates it and returns the declared size of the item.
|
||||
|
// It does not read the bytes of the item.
|
||||
|
func scanGetResponseLine(line []byte, it *Item) (size int, err error) { |
||||
|
pattern := "VALUE %s %d %d %d\r\n" |
||||
|
dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid} |
||||
|
if bytes.Count(line, space) == 3 { |
||||
|
pattern = "VALUE %s %d %d\r\n" |
||||
|
dest = dest[:3] |
||||
|
} |
||||
|
n, err := fmt.Sscanf(string(line), pattern, dest...) |
||||
|
if err != nil || n != len(dest) { |
||||
|
return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line) |
||||
|
} |
||||
|
return size, nil |
||||
|
} |
||||
|
|
||||
|
// Set writes the given item, unconditionally.
|
||||
|
func (c *Client) Set(item *Item) error { |
||||
|
return c.onItem(item, (*Client).set) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) set(rw *bufio.ReadWriter, item *Item) error { |
||||
|
return c.populateOne(rw, "set", item) |
||||
|
} |
||||
|
|
||||
|
// Add writes the given item, if no value already exists for its
|
||||
|
// key. ErrNotStored is returned if that condition is not met.
|
||||
|
func (c *Client) Add(item *Item) error { |
||||
|
return c.onItem(item, (*Client).add) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) add(rw *bufio.ReadWriter, item *Item) error { |
||||
|
return c.populateOne(rw, "add", item) |
||||
|
} |
||||
|
|
||||
|
// Replace writes the given item, but only if the server *does*
|
||||
|
// already hold data for this key
|
||||
|
func (c *Client) Replace(item *Item) error { |
||||
|
return c.onItem(item, (*Client).replace) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error { |
||||
|
return c.populateOne(rw, "replace", item) |
||||
|
} |
||||
|
|
||||
|
// CompareAndSwap writes the given item that was previously returned
|
||||
|
// by Get, if the value was neither modified or evicted between the
|
||||
|
// Get and the CompareAndSwap calls. The item's Key should not change
|
||||
|
// between calls but all other item fields may differ. ErrCASConflict
|
||||
|
// is returned if the value was modified in between the
|
||||
|
// calls. ErrNotStored is returned if the value was evicted in between
|
||||
|
// the calls.
|
||||
|
func (c *Client) CompareAndSwap(item *Item) error { |
||||
|
return c.onItem(item, (*Client).cas) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error { |
||||
|
return c.populateOne(rw, "cas", item) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error { |
||||
|
if !legalKey(item.Key) { |
||||
|
return ErrMalformedKey |
||||
|
} |
||||
|
var err error |
||||
|
if verb == "cas" { |
||||
|
_, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n", |
||||
|
verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid) |
||||
|
} else { |
||||
|
_, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n", |
||||
|
verb, item.Key, item.Flags, item.Expiration, len(item.Value)) |
||||
|
} |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if _, err = rw.Write(item.Value); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if _, err := rw.Write(crlf); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := rw.Flush(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
line, err := rw.ReadSlice('\n') |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
switch { |
||||
|
case bytes.Equal(line, resultStored): |
||||
|
return nil |
||||
|
case bytes.Equal(line, resultNotStored): |
||||
|
return ErrNotStored |
||||
|
case bytes.Equal(line, resultExists): |
||||
|
return ErrCASConflict |
||||
|
case bytes.Equal(line, resultNotFound): |
||||
|
return ErrCacheMiss |
||||
|
} |
||||
|
return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line)) |
||||
|
} |
||||
|
|
||||
|
func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) { |
||||
|
_, err := fmt.Fprintf(rw, format, args...) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
if err := rw.Flush(); err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
line, err := rw.ReadSlice('\n') |
||||
|
return line, err |
||||
|
} |
||||
|
|
||||
|
func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error { |
||||
|
line, err := writeReadLine(rw, format, args...) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
switch { |
||||
|
case bytes.Equal(line, resultOK): |
||||
|
return nil |
||||
|
case bytes.Equal(line, expect): |
||||
|
return nil |
||||
|
case bytes.Equal(line, resultNotStored): |
||||
|
return ErrNotStored |
||||
|
case bytes.Equal(line, resultExists): |
||||
|
return ErrCASConflict |
||||
|
case bytes.Equal(line, resultNotFound): |
||||
|
return ErrCacheMiss |
||||
|
} |
||||
|
return fmt.Errorf("memcache: unexpected response line: %q", string(line)) |
||||
|
} |
||||
|
|
||||
|
// Delete deletes the item with the provided key. The error ErrCacheMiss is
|
||||
|
// returned if the item didn't already exist in the cache.
|
||||
|
func (c *Client) Delete(key string) error { |
||||
|
return c.withKeyRw(key, func(rw *bufio.ReadWriter) error { |
||||
|
return writeExpectf(rw, resultDeleted, "delete %s\r\n", key) |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
// DeleteAll deletes all items in the cache.
|
||||
|
func (c *Client) DeleteAll() error { |
||||
|
return c.withKeyRw("", func(rw *bufio.ReadWriter) error { |
||||
|
return writeExpectf(rw, resultDeleted, "flush_all\r\n") |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
// Increment atomically increments key by delta. The return value is
|
||||
|
// the new value after being incremented or an error. If the value
|
||||
|
// didn't exist in memcached the error is ErrCacheMiss. The value in
|
||||
|
// memcached must be an decimal number, or an error will be returned.
|
||||
|
// On 64-bit overflow, the new value wraps around.
|
||||
|
func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) { |
||||
|
return c.incrDecr("incr", key, delta) |
||||
|
} |
||||
|
|
||||
|
// Decrement atomically decrements key by delta. The return value is
|
||||
|
// the new value after being decremented or an error. If the value
|
||||
|
// didn't exist in memcached the error is ErrCacheMiss. The value in
|
||||
|
// memcached must be an decimal number, or an error will be returned.
|
||||
|
// On underflow, the new value is capped at zero and does not wrap
|
||||
|
// around.
|
||||
|
func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) { |
||||
|
return c.incrDecr("decr", key, delta) |
||||
|
} |
||||
|
|
||||
|
func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) { |
||||
|
var val uint64 |
||||
|
err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error { |
||||
|
line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
switch { |
||||
|
case bytes.Equal(line, resultNotFound): |
||||
|
return ErrCacheMiss |
||||
|
case bytes.HasPrefix(line, resultClientErrorPrefix): |
||||
|
errMsg := line[len(resultClientErrorPrefix) : len(line)-2] |
||||
|
return errors.New("memcache: client error: " + string(errMsg)) |
||||
|
} |
||||
|
val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return nil |
||||
|
}) |
||||
|
return val, err |
||||
|
} |
@ -0,0 +1,230 @@ |
|||||
|
/* |
||||
|
Copyright 2011 Google Inc. |
||||
|
|
||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
|
you may not use this file except in compliance with the License. |
||||
|
You may obtain a copy of the License at |
||||
|
|
||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
|
||||
|
Unless required by applicable law or agreed to in writing, software |
||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
|
See the License for the specific language governing permissions and |
||||
|
limitations under the License. |
||||
|
*/ |
||||
|
|
||||
|
// Package memcache provides a client for the memcached cache server.
|
||||
|
package memcache |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"net" |
||||
|
"os" |
||||
|
"os/exec" |
||||
|
"strings" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
const testServer = "localhost:11211" |
||||
|
|
||||
|
func setup(t *testing.T) bool { |
||||
|
c, err := net.Dial("tcp", testServer) |
||||
|
if err != nil { |
||||
|
t.Skipf("skipping test; no server running at %s", testServer) |
||||
|
} |
||||
|
c.Write([]byte("flush_all\r\n")) |
||||
|
c.Close() |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
func TestLocalhost(t *testing.T) { |
||||
|
if !setup(t) { |
||||
|
return |
||||
|
} |
||||
|
testWithClient(t, New(testServer)) |
||||
|
} |
||||
|
|
||||
|
// Run the memcached binary as a child process and connect to its unix socket.
|
||||
|
func TestUnixSocket(t *testing.T) { |
||||
|
sock := fmt.Sprintf("/tmp/test-gomemcache-%d.sock", os.Getpid()) |
||||
|
cmd := exec.Command("memcached", "-s", sock) |
||||
|
if err := cmd.Start(); err != nil { |
||||
|
t.Skipf("skipping test; couldn't find memcached") |
||||
|
return |
||||
|
} |
||||
|
defer cmd.Wait() |
||||
|
defer cmd.Process.Kill() |
||||
|
|
||||
|
// Wait a bit for the socket to appear.
|
||||
|
for i := 0; i < 10; i++ { |
||||
|
if _, err := os.Stat(sock); err == nil { |
||||
|
break |
||||
|
} |
||||
|
time.Sleep(time.Duration(25*i) * time.Millisecond) |
||||
|
} |
||||
|
|
||||
|
testWithClient(t, New(sock)) |
||||
|
} |
||||
|
|
||||
|
func mustSetF(t *testing.T, c *Client) func(*Item) { |
||||
|
return func(it *Item) { |
||||
|
if err := c.Set(it); err != nil { |
||||
|
t.Fatalf("failed to Set %#v: %v", *it, err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func testWithClient(t *testing.T, c *Client) { |
||||
|
checkErr := func(err error, format string, args ...interface{}) { |
||||
|
if err != nil { |
||||
|
t.Fatalf(format, args...) |
||||
|
} |
||||
|
} |
||||
|
mustSet := mustSetF(t, c) |
||||
|
|
||||
|
// Set
|
||||
|
foo := &Item{Key: "foo", Value: []byte("fooval"), Flags: 123} |
||||
|
err := c.Set(foo) |
||||
|
checkErr(err, "first set(foo): %v", err) |
||||
|
err = c.Set(foo) |
||||
|
checkErr(err, "second set(foo): %v", err) |
||||
|
|
||||
|
// Get
|
||||
|
it, err := c.Get("foo") |
||||
|
checkErr(err, "get(foo): %v", err) |
||||
|
if it.Key != "foo" { |
||||
|
t.Errorf("get(foo) Key = %q, want foo", it.Key) |
||||
|
} |
||||
|
if string(it.Value) != "fooval" { |
||||
|
t.Errorf("get(foo) Value = %q, want fooval", string(it.Value)) |
||||
|
} |
||||
|
if it.Flags != 123 { |
||||
|
t.Errorf("get(foo) Flags = %v, want 123", it.Flags) |
||||
|
} |
||||
|
|
||||
|
// Add
|
||||
|
bar := &Item{Key: "bar", Value: []byte("barval")} |
||||
|
err = c.Add(bar) |
||||
|
checkErr(err, "first add(foo): %v", err) |
||||
|
if err := c.Add(bar); err != ErrNotStored { |
||||
|
t.Fatalf("second add(foo) want ErrNotStored, got %v", err) |
||||
|
} |
||||
|
|
||||
|
// Replace
|
||||
|
baz := &Item{Key: "baz", Value: []byte("bazvalue")} |
||||
|
if err := c.Replace(baz); err != ErrNotStored { |
||||
|
t.Fatalf("expected replace(baz) to return ErrNotStored, got %v", err) |
||||
|
} |
||||
|
err = c.Replace(bar) |
||||
|
checkErr(err, "replaced(foo): %v", err) |
||||
|
|
||||
|
// GetMulti
|
||||
|
m, err := c.GetMulti([]string{"foo", "bar"}) |
||||
|
checkErr(err, "GetMulti: %v", err) |
||||
|
if g, e := len(m), 2; g != e { |
||||
|
t.Errorf("GetMulti: got len(map) = %d, want = %d", g, e) |
||||
|
} |
||||
|
if _, ok := m["foo"]; !ok { |
||||
|
t.Fatalf("GetMulti: didn't get key 'foo'") |
||||
|
} |
||||
|
if _, ok := m["bar"]; !ok { |
||||
|
t.Fatalf("GetMulti: didn't get key 'bar'") |
||||
|
} |
||||
|
if g, e := string(m["foo"].Value), "fooval"; g != e { |
||||
|
t.Errorf("GetMulti: foo: got %q, want %q", g, e) |
||||
|
} |
||||
|
if g, e := string(m["bar"].Value), "barval"; g != e { |
||||
|
t.Errorf("GetMulti: bar: got %q, want %q", g, e) |
||||
|
} |
||||
|
|
||||
|
// Delete
|
||||
|
err = c.Delete("foo") |
||||
|
checkErr(err, "Delete: %v", err) |
||||
|
it, err = c.Get("foo") |
||||
|
if err != ErrCacheMiss { |
||||
|
t.Errorf("post-Delete want ErrCacheMiss, got %v", err) |
||||
|
} |
||||
|
|
||||
|
// Incr/Decr
|
||||
|
mustSet(&Item{Key: "num", Value: []byte("42")}) |
||||
|
n, err := c.Increment("num", 8) |
||||
|
checkErr(err, "Increment num + 8: %v", err) |
||||
|
if n != 50 { |
||||
|
t.Fatalf("Increment num + 8: want=50, got=%d", n) |
||||
|
} |
||||
|
n, err = c.Decrement("num", 49) |
||||
|
checkErr(err, "Decrement: %v", err) |
||||
|
if n != 1 { |
||||
|
t.Fatalf("Decrement 49: want=1, got=%d", n) |
||||
|
} |
||||
|
err = c.Delete("num") |
||||
|
checkErr(err, "delete num: %v", err) |
||||
|
n, err = c.Increment("num", 1) |
||||
|
if err != ErrCacheMiss { |
||||
|
t.Fatalf("increment post-delete: want ErrCacheMiss, got %v", err) |
||||
|
} |
||||
|
mustSet(&Item{Key: "num", Value: []byte("not-numeric")}) |
||||
|
n, err = c.Increment("num", 1) |
||||
|
if err == nil || !strings.Contains(err.Error(), "client error") { |
||||
|
t.Fatalf("increment non-number: want client error, got %v", err) |
||||
|
} |
||||
|
testTouchWithClient(t, c) |
||||
|
|
||||
|
// Test Delete All
|
||||
|
err = c.DeleteAll() |
||||
|
checkErr(err, "DeleteAll: %v", err) |
||||
|
it, err = c.Get("bar") |
||||
|
if err != ErrCacheMiss { |
||||
|
t.Errorf("post-DeleteAll want ErrCacheMiss, got %v", err) |
||||
|
} |
||||
|
|
||||
|
} |
||||
|
|
||||
|
func testTouchWithClient(t *testing.T, c *Client) { |
||||
|
if testing.Short() { |
||||
|
t.Log("Skipping testing memcache Touch with testing in Short mode") |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
mustSet := mustSetF(t, c) |
||||
|
|
||||
|
const secondsToExpiry = int32(2) |
||||
|
|
||||
|
// We will set foo and bar to expire in 2 seconds, then we'll keep touching
|
||||
|
// foo every second
|
||||
|
// After 3 seconds, we expect foo to be available, and bar to be expired
|
||||
|
foo := &Item{Key: "foo", Value: []byte("fooval"), Expiration: secondsToExpiry} |
||||
|
bar := &Item{Key: "bar", Value: []byte("barval"), Expiration: secondsToExpiry} |
||||
|
|
||||
|
setTime := time.Now() |
||||
|
mustSet(foo) |
||||
|
mustSet(bar) |
||||
|
|
||||
|
for s := 0; s < 3; s++ { |
||||
|
time.Sleep(time.Duration(1 * time.Second)) |
||||
|
err := c.Touch(foo.Key, secondsToExpiry) |
||||
|
if nil != err { |
||||
|
t.Errorf("error touching foo: %v", err.Error()) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
_, err := c.Get("foo") |
||||
|
if err != nil { |
||||
|
if err == ErrCacheMiss { |
||||
|
t.Fatalf("touching failed to keep item foo alive") |
||||
|
} else { |
||||
|
t.Fatalf("unexpected error retrieving foo after touching: %v", err.Error()) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
_, err = c.Get("bar") |
||||
|
if nil == err { |
||||
|
t.Fatalf("item bar did not expire within %v seconds", time.Now().Sub(setTime).Seconds()) |
||||
|
} else { |
||||
|
if err != ErrCacheMiss { |
||||
|
t.Fatalf("unexpected error retrieving bar: %v", err.Error()) |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,114 @@ |
|||||
|
/* |
||||
|
Copyright 2011 Google Inc. |
||||
|
|
||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
|
you may not use this file except in compliance with the License. |
||||
|
You may obtain a copy of the License at |
||||
|
|
||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
|
||||
|
Unless required by applicable law or agreed to in writing, software |
||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
|
See the License for the specific language governing permissions and |
||||
|
limitations under the License. |
||||
|
*/ |
||||
|
|
||||
|
package memcache |
||||
|
|
||||
|
import ( |
||||
|
"hash/crc32" |
||||
|
"net" |
||||
|
"strings" |
||||
|
"sync" |
||||
|
) |
||||
|
|
||||
|
// ServerSelector is the interface that selects a memcache server
|
||||
|
// as a function of the item's key.
|
||||
|
//
|
||||
|
// All ServerSelector implementations must be safe for concurrent use
|
||||
|
// by multiple goroutines.
|
||||
|
type ServerSelector interface { |
||||
|
// PickServer returns the server address that a given item
|
||||
|
// should be shared onto.
|
||||
|
PickServer(key string) (net.Addr, error) |
||||
|
Each(func(net.Addr) error) error |
||||
|
} |
||||
|
|
||||
|
// ServerList is a simple ServerSelector. Its zero value is usable.
|
||||
|
type ServerList struct { |
||||
|
mu sync.RWMutex |
||||
|
addrs []net.Addr |
||||
|
} |
||||
|
|
||||
|
// SetServers changes a ServerList's set of servers at runtime and is
|
||||
|
// safe for concurrent use by multiple goroutines.
|
||||
|
//
|
||||
|
// Each server is given equal weight. A server is given more weight
|
||||
|
// if it's listed multiple times.
|
||||
|
//
|
||||
|
// SetServers returns an error if any of the server names fail to
|
||||
|
// resolve. No attempt is made to connect to the server. If any error
|
||||
|
// is returned, no changes are made to the ServerList.
|
||||
|
func (ss *ServerList) SetServers(servers ...string) error { |
||||
|
naddr := make([]net.Addr, len(servers)) |
||||
|
for i, server := range servers { |
||||
|
if strings.Contains(server, "/") { |
||||
|
addr, err := net.ResolveUnixAddr("unix", server) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
naddr[i] = addr |
||||
|
} else { |
||||
|
tcpaddr, err := net.ResolveTCPAddr("tcp", server) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
naddr[i] = tcpaddr |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
ss.mu.Lock() |
||||
|
defer ss.mu.Unlock() |
||||
|
ss.addrs = naddr |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// Each iterates over each server calling the given function
|
||||
|
func (ss *ServerList) Each(f func(net.Addr) error) error { |
||||
|
ss.mu.RLock() |
||||
|
defer ss.mu.RUnlock() |
||||
|
for _, a := range ss.addrs { |
||||
|
if err := f(a); nil != err { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// keyBufPool returns []byte buffers for use by PickServer's call to
|
||||
|
// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the
|
||||
|
// copies, which at least are bounded in size and small)
|
||||
|
var keyBufPool = sync.Pool{ |
||||
|
New: func() interface{} { |
||||
|
b := make([]byte, 256) |
||||
|
return &b |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
func (ss *ServerList) PickServer(key string) (net.Addr, error) { |
||||
|
ss.mu.RLock() |
||||
|
defer ss.mu.RUnlock() |
||||
|
if len(ss.addrs) == 0 { |
||||
|
return nil, ErrNoServers |
||||
|
} |
||||
|
if len(ss.addrs) == 1 { |
||||
|
return ss.addrs[0], nil |
||||
|
} |
||||
|
bufp := keyBufPool.Get().(*[]byte) |
||||
|
n := copy(*bufp, key) |
||||
|
cs := crc32.ChecksumIEEE((*bufp)[:n]) |
||||
|
keyBufPool.Put(bufp) |
||||
|
|
||||
|
return ss.addrs[cs%uint32(len(ss.addrs))], nil |
||||
|
} |
@ -0,0 +1,39 @@ |
|||||
|
/* |
||||
|
Copyright 2014 Google Inc. |
||||
|
|
||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
|
you may not use this file except in compliance with the License. |
||||
|
You may obtain a copy of the License at |
||||
|
|
||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
|
||||
|
Unless required by applicable law or agreed to in writing, software |
||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
|
See the License for the specific language governing permissions and |
||||
|
limitations under the License. |
||||
|
*/ |
||||
|
|
||||
|
package memcache |
||||
|
|
||||
|
import "testing" |
||||
|
|
||||
|
func BenchmarkPickServer(b *testing.B) { |
||||
|
// at least two to avoid 0 and 1 special cases:
|
||||
|
benchPickServer(b, "127.0.0.1:1234", "127.0.0.1:1235") |
||||
|
} |
||||
|
|
||||
|
func BenchmarkPickServer_Single(b *testing.B) { |
||||
|
benchPickServer(b, "127.0.0.1:1234") |
||||
|
} |
||||
|
|
||||
|
func benchPickServer(b *testing.B, servers ...string) { |
||||
|
b.ReportAllocs() |
||||
|
var ss ServerList |
||||
|
ss.SetServers(servers...) |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
if _, err := ss.PickServer("some key"); err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,54 @@ |
|||||
|
// Copyright 2014 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package internal // import "github.com/garyburd/redigo/internal"
|
||||
|
|
||||
|
import ( |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
WatchState = 1 << iota |
||||
|
MultiState |
||||
|
SubscribeState |
||||
|
MonitorState |
||||
|
) |
||||
|
|
||||
|
type CommandInfo struct { |
||||
|
Set, Clear int |
||||
|
} |
||||
|
|
||||
|
var commandInfos = map[string]CommandInfo{ |
||||
|
"WATCH": {Set: WatchState}, |
||||
|
"UNWATCH": {Clear: WatchState}, |
||||
|
"MULTI": {Set: MultiState}, |
||||
|
"EXEC": {Clear: WatchState | MultiState}, |
||||
|
"DISCARD": {Clear: WatchState | MultiState}, |
||||
|
"PSUBSCRIBE": {Set: SubscribeState}, |
||||
|
"SUBSCRIBE": {Set: SubscribeState}, |
||||
|
"MONITOR": {Set: MonitorState}, |
||||
|
} |
||||
|
|
||||
|
func init() { |
||||
|
for n, ci := range commandInfos { |
||||
|
commandInfos[strings.ToLower(n)] = ci |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func LookupCommandInfo(commandName string) CommandInfo { |
||||
|
if ci, ok := commandInfos[commandName]; ok { |
||||
|
return ci |
||||
|
} |
||||
|
return commandInfos[strings.ToUpper(commandName)] |
||||
|
} |
@ -0,0 +1,27 @@ |
|||||
|
package internal |
||||
|
|
||||
|
import "testing" |
||||
|
|
||||
|
func TestLookupCommandInfo(t *testing.T) { |
||||
|
for _, n := range []string{"watch", "WATCH", "wAtch"} { |
||||
|
if LookupCommandInfo(n) == (CommandInfo{}) { |
||||
|
t.Errorf("LookupCommandInfo(%q) = CommandInfo{}, expected non-zero value", n) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func benchmarkLookupCommandInfo(b *testing.B, names ...string) { |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
for _, c := range names { |
||||
|
LookupCommandInfo(c) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkLookupCommandInfoCorrectCase(b *testing.B) { |
||||
|
benchmarkLookupCommandInfo(b, "watch", "WATCH", "monitor", "MONITOR") |
||||
|
} |
||||
|
|
||||
|
func BenchmarkLookupCommandInfoMixedCase(b *testing.B) { |
||||
|
benchmarkLookupCommandInfo(b, "wAtch", "WeTCH", "monItor", "MONiTOR") |
||||
|
} |
@ -0,0 +1,68 @@ |
|||||
|
// Copyright 2014 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
// Package redistest contains utilities for writing Redigo tests.
|
||||
|
package redistest |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/garyburd/redigo/redis" |
||||
|
) |
||||
|
|
||||
|
type testConn struct { |
||||
|
redis.Conn |
||||
|
} |
||||
|
|
||||
|
func (t testConn) Close() error { |
||||
|
_, err := t.Conn.Do("SELECT", "9") |
||||
|
if err != nil { |
||||
|
return nil |
||||
|
} |
||||
|
_, err = t.Conn.Do("FLUSHDB") |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return t.Conn.Close() |
||||
|
} |
||||
|
|
||||
|
// Dial dials the local Redis server and selects database 9. To prevent
|
||||
|
// stomping on real data, DialTestDB fails if database 9 contains data. The
|
||||
|
// returned connection flushes database 9 on close.
|
||||
|
func Dial() (redis.Conn, error) { |
||||
|
c, err := redis.DialTimeout("tcp", ":6379", 0, 1*time.Second, 1*time.Second) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
_, err = c.Do("SELECT", "9") |
||||
|
if err != nil { |
||||
|
c.Close() |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
n, err := redis.Int(c.Do("DBSIZE")) |
||||
|
if err != nil { |
||||
|
c.Close() |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
if n != 0 { |
||||
|
c.Close() |
||||
|
return nil, errors.New("database #9 is not empty, test can not continue") |
||||
|
} |
||||
|
|
||||
|
return testConn{c}, nil |
||||
|
} |
@ -0,0 +1,570 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis |
||||
|
|
||||
|
import ( |
||||
|
"bufio" |
||||
|
"bytes" |
||||
|
"errors" |
||||
|
"fmt" |
||||
|
"io" |
||||
|
"net" |
||||
|
"net/url" |
||||
|
"regexp" |
||||
|
"strconv" |
||||
|
"sync" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
// conn is the low-level implementation of Conn
|
||||
|
type conn struct { |
||||
|
|
||||
|
// Shared
|
||||
|
mu sync.Mutex |
||||
|
pending int |
||||
|
err error |
||||
|
conn net.Conn |
||||
|
|
||||
|
// Read
|
||||
|
readTimeout time.Duration |
||||
|
br *bufio.Reader |
||||
|
|
||||
|
// Write
|
||||
|
writeTimeout time.Duration |
||||
|
bw *bufio.Writer |
||||
|
|
||||
|
// Scratch space for formatting argument length.
|
||||
|
// '*' or '$', length, "\r\n"
|
||||
|
lenScratch [32]byte |
||||
|
|
||||
|
// Scratch space for formatting integers and floats.
|
||||
|
numScratch [40]byte |
||||
|
} |
||||
|
|
||||
|
// DialTimeout acts like Dial but takes timeouts for establishing the
|
||||
|
// connection to the server, writing a command and reading a reply.
|
||||
|
//
|
||||
|
// Deprecated: Use Dial with options instead.
|
||||
|
func DialTimeout(network, address string, connectTimeout, readTimeout, writeTimeout time.Duration) (Conn, error) { |
||||
|
return Dial(network, address, |
||||
|
DialConnectTimeout(connectTimeout), |
||||
|
DialReadTimeout(readTimeout), |
||||
|
DialWriteTimeout(writeTimeout)) |
||||
|
} |
||||
|
|
||||
|
// DialOption specifies an option for dialing a Redis server.
|
||||
|
type DialOption struct { |
||||
|
f func(*dialOptions) |
||||
|
} |
||||
|
|
||||
|
type dialOptions struct { |
||||
|
readTimeout time.Duration |
||||
|
writeTimeout time.Duration |
||||
|
dial func(network, addr string) (net.Conn, error) |
||||
|
db int |
||||
|
password string |
||||
|
} |
||||
|
|
||||
|
// DialReadTimeout specifies the timeout for reading a single command reply.
|
||||
|
func DialReadTimeout(d time.Duration) DialOption { |
||||
|
return DialOption{func(do *dialOptions) { |
||||
|
do.readTimeout = d |
||||
|
}} |
||||
|
} |
||||
|
|
||||
|
// DialWriteTimeout specifies the timeout for writing a single command.
|
||||
|
func DialWriteTimeout(d time.Duration) DialOption { |
||||
|
return DialOption{func(do *dialOptions) { |
||||
|
do.writeTimeout = d |
||||
|
}} |
||||
|
} |
||||
|
|
||||
|
// DialConnectTimeout specifies the timeout for connecting to the Redis server.
|
||||
|
func DialConnectTimeout(d time.Duration) DialOption { |
||||
|
return DialOption{func(do *dialOptions) { |
||||
|
dialer := net.Dialer{Timeout: d} |
||||
|
do.dial = dialer.Dial |
||||
|
}} |
||||
|
} |
||||
|
|
||||
|
// DialNetDial specifies a custom dial function for creating TCP
|
||||
|
// connections. If this option is left out, then net.Dial is
|
||||
|
// used. DialNetDial overrides DialConnectTimeout.
|
||||
|
func DialNetDial(dial func(network, addr string) (net.Conn, error)) DialOption { |
||||
|
return DialOption{func(do *dialOptions) { |
||||
|
do.dial = dial |
||||
|
}} |
||||
|
} |
||||
|
|
||||
|
// DialDatabase specifies the database to select when dialing a connection.
|
||||
|
func DialDatabase(db int) DialOption { |
||||
|
return DialOption{func(do *dialOptions) { |
||||
|
do.db = db |
||||
|
}} |
||||
|
} |
||||
|
|
||||
|
// DialPassword specifies the password to use when connecting to
|
||||
|
// the Redis server.
|
||||
|
func DialPassword(password string) DialOption { |
||||
|
return DialOption{func(do *dialOptions) { |
||||
|
do.password = password |
||||
|
}} |
||||
|
} |
||||
|
|
||||
|
// Dial connects to the Redis server at the given network and
|
||||
|
// address using the specified options.
|
||||
|
func Dial(network, address string, options ...DialOption) (Conn, error) { |
||||
|
do := dialOptions{ |
||||
|
dial: net.Dial, |
||||
|
} |
||||
|
for _, option := range options { |
||||
|
option.f(&do) |
||||
|
} |
||||
|
|
||||
|
netConn, err := do.dial(network, address) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
c := &conn{ |
||||
|
conn: netConn, |
||||
|
bw: bufio.NewWriter(netConn), |
||||
|
br: bufio.NewReader(netConn), |
||||
|
readTimeout: do.readTimeout, |
||||
|
writeTimeout: do.writeTimeout, |
||||
|
} |
||||
|
|
||||
|
if do.password != "" { |
||||
|
if _, err := c.Do("AUTH", do.password); err != nil { |
||||
|
netConn.Close() |
||||
|
return nil, err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if do.db != 0 { |
||||
|
if _, err := c.Do("SELECT", do.db); err != nil { |
||||
|
netConn.Close() |
||||
|
return nil, err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return c, nil |
||||
|
} |
||||
|
|
||||
|
var pathDBRegexp = regexp.MustCompile(`/(\d*)\z`) |
||||
|
|
||||
|
// DialURL connects to a Redis server at the given URL using the Redis
|
||||
|
// URI scheme. URLs should follow the draft IANA specification for the
|
||||
|
// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis).
|
||||
|
func DialURL(rawurl string, options ...DialOption) (Conn, error) { |
||||
|
u, err := url.Parse(rawurl) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
if u.Scheme != "redis" { |
||||
|
return nil, fmt.Errorf("invalid redis URL scheme: %s", u.Scheme) |
||||
|
} |
||||
|
|
||||
|
// As per the IANA draft spec, the host defaults to localhost and
|
||||
|
// the port defaults to 6379.
|
||||
|
host, port, err := net.SplitHostPort(u.Host) |
||||
|
if err != nil { |
||||
|
// assume port is missing
|
||||
|
host = u.Host |
||||
|
port = "6379" |
||||
|
} |
||||
|
if host == "" { |
||||
|
host = "localhost" |
||||
|
} |
||||
|
address := net.JoinHostPort(host, port) |
||||
|
|
||||
|
if u.User != nil { |
||||
|
password, isSet := u.User.Password() |
||||
|
if isSet { |
||||
|
options = append(options, DialPassword(password)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
match := pathDBRegexp.FindStringSubmatch(u.Path) |
||||
|
if len(match) == 2 { |
||||
|
db := 0 |
||||
|
if len(match[1]) > 0 { |
||||
|
db, err = strconv.Atoi(match[1]) |
||||
|
if err != nil { |
||||
|
return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) |
||||
|
} |
||||
|
} |
||||
|
if db != 0 { |
||||
|
options = append(options, DialDatabase(db)) |
||||
|
} |
||||
|
} else if u.Path != "" { |
||||
|
return nil, fmt.Errorf("invalid database: %s", u.Path[1:]) |
||||
|
} |
||||
|
|
||||
|
return Dial("tcp", address, options...) |
||||
|
} |
||||
|
|
||||
|
// NewConn returns a new Redigo connection for the given net connection.
|
||||
|
func NewConn(netConn net.Conn, readTimeout, writeTimeout time.Duration) Conn { |
||||
|
return &conn{ |
||||
|
conn: netConn, |
||||
|
bw: bufio.NewWriter(netConn), |
||||
|
br: bufio.NewReader(netConn), |
||||
|
readTimeout: readTimeout, |
||||
|
writeTimeout: writeTimeout, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (c *conn) Close() error { |
||||
|
c.mu.Lock() |
||||
|
err := c.err |
||||
|
if c.err == nil { |
||||
|
c.err = errors.New("redigo: closed") |
||||
|
err = c.conn.Close() |
||||
|
} |
||||
|
c.mu.Unlock() |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (c *conn) fatal(err error) error { |
||||
|
c.mu.Lock() |
||||
|
if c.err == nil { |
||||
|
c.err = err |
||||
|
// Close connection to force errors on subsequent calls and to unblock
|
||||
|
// other reader or writer.
|
||||
|
c.conn.Close() |
||||
|
} |
||||
|
c.mu.Unlock() |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (c *conn) Err() error { |
||||
|
c.mu.Lock() |
||||
|
err := c.err |
||||
|
c.mu.Unlock() |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (c *conn) writeLen(prefix byte, n int) error { |
||||
|
c.lenScratch[len(c.lenScratch)-1] = '\n' |
||||
|
c.lenScratch[len(c.lenScratch)-2] = '\r' |
||||
|
i := len(c.lenScratch) - 3 |
||||
|
for { |
||||
|
c.lenScratch[i] = byte('0' + n%10) |
||||
|
i -= 1 |
||||
|
n = n / 10 |
||||
|
if n == 0 { |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
c.lenScratch[i] = prefix |
||||
|
_, err := c.bw.Write(c.lenScratch[i:]) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (c *conn) writeString(s string) error { |
||||
|
c.writeLen('$', len(s)) |
||||
|
c.bw.WriteString(s) |
||||
|
_, err := c.bw.WriteString("\r\n") |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (c *conn) writeBytes(p []byte) error { |
||||
|
c.writeLen('$', len(p)) |
||||
|
c.bw.Write(p) |
||||
|
_, err := c.bw.WriteString("\r\n") |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (c *conn) writeInt64(n int64) error { |
||||
|
return c.writeBytes(strconv.AppendInt(c.numScratch[:0], n, 10)) |
||||
|
} |
||||
|
|
||||
|
func (c *conn) writeFloat64(n float64) error { |
||||
|
return c.writeBytes(strconv.AppendFloat(c.numScratch[:0], n, 'g', -1, 64)) |
||||
|
} |
||||
|
|
||||
|
func (c *conn) writeCommand(cmd string, args []interface{}) (err error) { |
||||
|
c.writeLen('*', 1+len(args)) |
||||
|
err = c.writeString(cmd) |
||||
|
for _, arg := range args { |
||||
|
if err != nil { |
||||
|
break |
||||
|
} |
||||
|
switch arg := arg.(type) { |
||||
|
case string: |
||||
|
err = c.writeString(arg) |
||||
|
case []byte: |
||||
|
err = c.writeBytes(arg) |
||||
|
case int: |
||||
|
err = c.writeInt64(int64(arg)) |
||||
|
case int64: |
||||
|
err = c.writeInt64(arg) |
||||
|
case float64: |
||||
|
err = c.writeFloat64(arg) |
||||
|
case bool: |
||||
|
if arg { |
||||
|
err = c.writeString("1") |
||||
|
} else { |
||||
|
err = c.writeString("0") |
||||
|
} |
||||
|
case nil: |
||||
|
err = c.writeString("") |
||||
|
default: |
||||
|
var buf bytes.Buffer |
||||
|
fmt.Fprint(&buf, arg) |
||||
|
err = c.writeBytes(buf.Bytes()) |
||||
|
} |
||||
|
} |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
type protocolError string |
||||
|
|
||||
|
func (pe protocolError) Error() string { |
||||
|
return fmt.Sprintf("redigo: %s (possible server error or unsupported concurrent read by application)", string(pe)) |
||||
|
} |
||||
|
|
||||
|
func (c *conn) readLine() ([]byte, error) { |
||||
|
p, err := c.br.ReadSlice('\n') |
||||
|
if err == bufio.ErrBufferFull { |
||||
|
return nil, protocolError("long response line") |
||||
|
} |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
i := len(p) - 2 |
||||
|
if i < 0 || p[i] != '\r' { |
||||
|
return nil, protocolError("bad response line terminator") |
||||
|
} |
||||
|
return p[:i], nil |
||||
|
} |
||||
|
|
||||
|
// parseLen parses bulk string and array lengths.
|
||||
|
func parseLen(p []byte) (int, error) { |
||||
|
if len(p) == 0 { |
||||
|
return -1, protocolError("malformed length") |
||||
|
} |
||||
|
|
||||
|
if p[0] == '-' && len(p) == 2 && p[1] == '1' { |
||||
|
// handle $-1 and $-1 null replies.
|
||||
|
return -1, nil |
||||
|
} |
||||
|
|
||||
|
var n int |
||||
|
for _, b := range p { |
||||
|
n *= 10 |
||||
|
if b < '0' || b > '9' { |
||||
|
return -1, protocolError("illegal bytes in length") |
||||
|
} |
||||
|
n += int(b - '0') |
||||
|
} |
||||
|
|
||||
|
return n, nil |
||||
|
} |
||||
|
|
||||
|
// parseInt parses an integer reply.
|
||||
|
func parseInt(p []byte) (interface{}, error) { |
||||
|
if len(p) == 0 { |
||||
|
return 0, protocolError("malformed integer") |
||||
|
} |
||||
|
|
||||
|
var negate bool |
||||
|
if p[0] == '-' { |
||||
|
negate = true |
||||
|
p = p[1:] |
||||
|
if len(p) == 0 { |
||||
|
return 0, protocolError("malformed integer") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
var n int64 |
||||
|
for _, b := range p { |
||||
|
n *= 10 |
||||
|
if b < '0' || b > '9' { |
||||
|
return 0, protocolError("illegal bytes in length") |
||||
|
} |
||||
|
n += int64(b - '0') |
||||
|
} |
||||
|
|
||||
|
if negate { |
||||
|
n = -n |
||||
|
} |
||||
|
return n, nil |
||||
|
} |
||||
|
|
||||
|
var ( |
||||
|
okReply interface{} = "OK" |
||||
|
pongReply interface{} = "PONG" |
||||
|
) |
||||
|
|
||||
|
func (c *conn) readReply() (interface{}, error) { |
||||
|
line, err := c.readLine() |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
if len(line) == 0 { |
||||
|
return nil, protocolError("short response line") |
||||
|
} |
||||
|
switch line[0] { |
||||
|
case '+': |
||||
|
switch { |
||||
|
case len(line) == 3 && line[1] == 'O' && line[2] == 'K': |
||||
|
// Avoid allocation for frequent "+OK" response.
|
||||
|
return okReply, nil |
||||
|
case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': |
||||
|
// Avoid allocation in PING command benchmarks :)
|
||||
|
return pongReply, nil |
||||
|
default: |
||||
|
return string(line[1:]), nil |
||||
|
} |
||||
|
case '-': |
||||
|
return Error(string(line[1:])), nil |
||||
|
case ':': |
||||
|
return parseInt(line[1:]) |
||||
|
case '$': |
||||
|
n, err := parseLen(line[1:]) |
||||
|
if n < 0 || err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
p := make([]byte, n) |
||||
|
_, err = io.ReadFull(c.br, p) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
if line, err := c.readLine(); err != nil { |
||||
|
return nil, err |
||||
|
} else if len(line) != 0 { |
||||
|
return nil, protocolError("bad bulk string format") |
||||
|
} |
||||
|
return p, nil |
||||
|
case '*': |
||||
|
n, err := parseLen(line[1:]) |
||||
|
if n < 0 || err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
r := make([]interface{}, n) |
||||
|
for i := range r { |
||||
|
r[i], err = c.readReply() |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
} |
||||
|
return r, nil |
||||
|
} |
||||
|
return nil, protocolError("unexpected response line") |
||||
|
} |
||||
|
|
||||
|
func (c *conn) Send(cmd string, args ...interface{}) error { |
||||
|
c.mu.Lock() |
||||
|
c.pending += 1 |
||||
|
c.mu.Unlock() |
||||
|
if c.writeTimeout != 0 { |
||||
|
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) |
||||
|
} |
||||
|
if err := c.writeCommand(cmd, args); err != nil { |
||||
|
return c.fatal(err) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (c *conn) Flush() error { |
||||
|
if c.writeTimeout != 0 { |
||||
|
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) |
||||
|
} |
||||
|
if err := c.bw.Flush(); err != nil { |
||||
|
return c.fatal(err) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (c *conn) Receive() (reply interface{}, err error) { |
||||
|
if c.readTimeout != 0 { |
||||
|
c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) |
||||
|
} |
||||
|
if reply, err = c.readReply(); err != nil { |
||||
|
return nil, c.fatal(err) |
||||
|
} |
||||
|
// When using pub/sub, the number of receives can be greater than the
|
||||
|
// number of sends. To enable normal use of the connection after
|
||||
|
// unsubscribing from all channels, we do not decrement pending to a
|
||||
|
// negative value.
|
||||
|
//
|
||||
|
// The pending field is decremented after the reply is read to handle the
|
||||
|
// case where Receive is called before Send.
|
||||
|
c.mu.Lock() |
||||
|
if c.pending > 0 { |
||||
|
c.pending -= 1 |
||||
|
} |
||||
|
c.mu.Unlock() |
||||
|
if err, ok := reply.(Error); ok { |
||||
|
return nil, err |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (c *conn) Do(cmd string, args ...interface{}) (interface{}, error) { |
||||
|
c.mu.Lock() |
||||
|
pending := c.pending |
||||
|
c.pending = 0 |
||||
|
c.mu.Unlock() |
||||
|
|
||||
|
if cmd == "" && pending == 0 { |
||||
|
return nil, nil |
||||
|
} |
||||
|
|
||||
|
if c.writeTimeout != 0 { |
||||
|
c.conn.SetWriteDeadline(time.Now().Add(c.writeTimeout)) |
||||
|
} |
||||
|
|
||||
|
if cmd != "" { |
||||
|
if err := c.writeCommand(cmd, args); err != nil { |
||||
|
return nil, c.fatal(err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if err := c.bw.Flush(); err != nil { |
||||
|
return nil, c.fatal(err) |
||||
|
} |
||||
|
|
||||
|
if c.readTimeout != 0 { |
||||
|
c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) |
||||
|
} |
||||
|
|
||||
|
if cmd == "" { |
||||
|
reply := make([]interface{}, pending) |
||||
|
for i := range reply { |
||||
|
r, e := c.readReply() |
||||
|
if e != nil { |
||||
|
return nil, c.fatal(e) |
||||
|
} |
||||
|
reply[i] = r |
||||
|
} |
||||
|
return reply, nil |
||||
|
} |
||||
|
|
||||
|
var err error |
||||
|
var reply interface{} |
||||
|
for i := 0; i <= pending; i++ { |
||||
|
var e error |
||||
|
if reply, e = c.readReply(); e != nil { |
||||
|
return nil, c.fatal(e) |
||||
|
} |
||||
|
if e, ok := reply.(Error); ok && err == nil { |
||||
|
err = e |
||||
|
} |
||||
|
} |
||||
|
return reply, err |
||||
|
} |
@ -0,0 +1,670 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis_test |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"io" |
||||
|
"math" |
||||
|
"net" |
||||
|
"os" |
||||
|
"reflect" |
||||
|
"strings" |
||||
|
"testing" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/garyburd/redigo/redis" |
||||
|
) |
||||
|
|
||||
|
type testConn struct { |
||||
|
io.Reader |
||||
|
io.Writer |
||||
|
} |
||||
|
|
||||
|
func (*testConn) Close() error { return nil } |
||||
|
func (*testConn) LocalAddr() net.Addr { return nil } |
||||
|
func (*testConn) RemoteAddr() net.Addr { return nil } |
||||
|
func (*testConn) SetDeadline(t time.Time) error { return nil } |
||||
|
func (*testConn) SetReadDeadline(t time.Time) error { return nil } |
||||
|
func (*testConn) SetWriteDeadline(t time.Time) error { return nil } |
||||
|
|
||||
|
func dialTestConn(r io.Reader, w io.Writer) redis.DialOption { |
||||
|
return redis.DialNetDial(func(net, addr string) (net.Conn, error) { |
||||
|
return &testConn{Reader: r, Writer: w}, nil |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
var writeTests = []struct { |
||||
|
args []interface{} |
||||
|
expected string |
||||
|
}{ |
||||
|
{ |
||||
|
[]interface{}{"SET", "key", "value"}, |
||||
|
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"SET", "key", "value"}, |
||||
|
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$5\r\nvalue\r\n", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"SET", "key", byte(100)}, |
||||
|
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"SET", "key", 100}, |
||||
|
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$3\r\n100\r\n", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"SET", "key", int64(math.MinInt64)}, |
||||
|
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$20\r\n-9223372036854775808\r\n", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"SET", "key", float64(1349673917.939762)}, |
||||
|
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$21\r\n1.349673917939762e+09\r\n", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"SET", "key", ""}, |
||||
|
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"SET", "key", nil}, |
||||
|
"*3\r\n$3\r\nSET\r\n$3\r\nkey\r\n$0\r\n\r\n", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"ECHO", true, false}, |
||||
|
"*3\r\n$4\r\nECHO\r\n$1\r\n1\r\n$1\r\n0\r\n", |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
func TestWrite(t *testing.T) { |
||||
|
for _, tt := range writeTests { |
||||
|
var buf bytes.Buffer |
||||
|
c, _ := redis.Dial("", "", dialTestConn(nil, &buf)) |
||||
|
err := c.Send(tt.args[0].(string), tt.args[1:]...) |
||||
|
if err != nil { |
||||
|
t.Errorf("Send(%v) returned error %v", tt.args, err) |
||||
|
continue |
||||
|
} |
||||
|
c.Flush() |
||||
|
actual := buf.String() |
||||
|
if actual != tt.expected { |
||||
|
t.Errorf("Send(%v) = %q, want %q", tt.args, actual, tt.expected) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
var errorSentinel = &struct{}{} |
||||
|
|
||||
|
var readTests = []struct { |
||||
|
reply string |
||||
|
expected interface{} |
||||
|
}{ |
||||
|
{ |
||||
|
"+OK\r\n", |
||||
|
"OK", |
||||
|
}, |
||||
|
{ |
||||
|
"+PONG\r\n", |
||||
|
"PONG", |
||||
|
}, |
||||
|
{ |
||||
|
"@OK\r\n", |
||||
|
errorSentinel, |
||||
|
}, |
||||
|
{ |
||||
|
"$6\r\nfoobar\r\n", |
||||
|
[]byte("foobar"), |
||||
|
}, |
||||
|
{ |
||||
|
"$-1\r\n", |
||||
|
nil, |
||||
|
}, |
||||
|
{ |
||||
|
":1\r\n", |
||||
|
int64(1), |
||||
|
}, |
||||
|
{ |
||||
|
":-2\r\n", |
||||
|
int64(-2), |
||||
|
}, |
||||
|
{ |
||||
|
"*0\r\n", |
||||
|
[]interface{}{}, |
||||
|
}, |
||||
|
{ |
||||
|
"*-1\r\n", |
||||
|
nil, |
||||
|
}, |
||||
|
{ |
||||
|
"*4\r\n$3\r\nfoo\r\n$3\r\nbar\r\n$5\r\nHello\r\n$5\r\nWorld\r\n", |
||||
|
[]interface{}{[]byte("foo"), []byte("bar"), []byte("Hello"), []byte("World")}, |
||||
|
}, |
||||
|
{ |
||||
|
"*3\r\n$3\r\nfoo\r\n$-1\r\n$3\r\nbar\r\n", |
||||
|
[]interface{}{[]byte("foo"), nil, []byte("bar")}, |
||||
|
}, |
||||
|
|
||||
|
{ |
||||
|
// "x" is not a valid length
|
||||
|
"$x\r\nfoobar\r\n", |
||||
|
errorSentinel, |
||||
|
}, |
||||
|
{ |
||||
|
// -2 is not a valid length
|
||||
|
"$-2\r\n", |
||||
|
errorSentinel, |
||||
|
}, |
||||
|
{ |
||||
|
// "x" is not a valid integer
|
||||
|
":x\r\n", |
||||
|
errorSentinel, |
||||
|
}, |
||||
|
{ |
||||
|
// missing \r\n following value
|
||||
|
"$6\r\nfoobar", |
||||
|
errorSentinel, |
||||
|
}, |
||||
|
{ |
||||
|
// short value
|
||||
|
"$6\r\nxx", |
||||
|
errorSentinel, |
||||
|
}, |
||||
|
{ |
||||
|
// long value
|
||||
|
"$6\r\nfoobarx\r\n", |
||||
|
errorSentinel, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
func TestRead(t *testing.T) { |
||||
|
for _, tt := range readTests { |
||||
|
c, _ := redis.Dial("", "", dialTestConn(strings.NewReader(tt.reply), nil)) |
||||
|
actual, err := c.Receive() |
||||
|
if tt.expected == errorSentinel { |
||||
|
if err == nil { |
||||
|
t.Errorf("Receive(%q) did not return expected error", tt.reply) |
||||
|
} |
||||
|
} else { |
||||
|
if err != nil { |
||||
|
t.Errorf("Receive(%q) returned error %v", tt.reply, err) |
||||
|
continue |
||||
|
} |
||||
|
if !reflect.DeepEqual(actual, tt.expected) { |
||||
|
t.Errorf("Receive(%q) = %v, want %v", tt.reply, actual, tt.expected) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
var testCommands = []struct { |
||||
|
args []interface{} |
||||
|
expected interface{} |
||||
|
}{ |
||||
|
{ |
||||
|
[]interface{}{"PING"}, |
||||
|
"PONG", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"SET", "foo", "bar"}, |
||||
|
"OK", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"GET", "foo"}, |
||||
|
[]byte("bar"), |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"GET", "nokey"}, |
||||
|
nil, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"MGET", "nokey", "foo"}, |
||||
|
[]interface{}{nil, []byte("bar")}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"INCR", "mycounter"}, |
||||
|
int64(1), |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"LPUSH", "mylist", "foo"}, |
||||
|
int64(1), |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"LPUSH", "mylist", "bar"}, |
||||
|
int64(2), |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"LRANGE", "mylist", 0, -1}, |
||||
|
[]interface{}{[]byte("bar"), []byte("foo")}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"MULTI"}, |
||||
|
"OK", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"LRANGE", "mylist", 0, -1}, |
||||
|
"QUEUED", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"PING"}, |
||||
|
"QUEUED", |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{"EXEC"}, |
||||
|
[]interface{}{ |
||||
|
[]interface{}{[]byte("bar"), []byte("foo")}, |
||||
|
"PONG", |
||||
|
}, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
func TestDoCommands(t *testing.T) { |
||||
|
c, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
t.Fatalf("error connection to database, %v", err) |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
for _, cmd := range testCommands { |
||||
|
actual, err := c.Do(cmd.args[0].(string), cmd.args[1:]...) |
||||
|
if err != nil { |
||||
|
t.Errorf("Do(%v) returned error %v", cmd.args, err) |
||||
|
continue |
||||
|
} |
||||
|
if !reflect.DeepEqual(actual, cmd.expected) { |
||||
|
t.Errorf("Do(%v) = %v, want %v", cmd.args, actual, cmd.expected) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestPipelineCommands(t *testing.T) { |
||||
|
c, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
t.Fatalf("error connection to database, %v", err) |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
for _, cmd := range testCommands { |
||||
|
if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { |
||||
|
t.Fatalf("Send(%v) returned error %v", cmd.args, err) |
||||
|
} |
||||
|
} |
||||
|
if err := c.Flush(); err != nil { |
||||
|
t.Errorf("Flush() returned error %v", err) |
||||
|
} |
||||
|
for _, cmd := range testCommands { |
||||
|
actual, err := c.Receive() |
||||
|
if err != nil { |
||||
|
t.Fatalf("Receive(%v) returned error %v", cmd.args, err) |
||||
|
} |
||||
|
if !reflect.DeepEqual(actual, cmd.expected) { |
||||
|
t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestBlankCommmand(t *testing.T) { |
||||
|
c, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
t.Fatalf("error connection to database, %v", err) |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
for _, cmd := range testCommands { |
||||
|
if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { |
||||
|
t.Fatalf("Send(%v) returned error %v", cmd.args, err) |
||||
|
} |
||||
|
} |
||||
|
reply, err := redis.Values(c.Do("")) |
||||
|
if err != nil { |
||||
|
t.Fatalf("Do() returned error %v", err) |
||||
|
} |
||||
|
if len(reply) != len(testCommands) { |
||||
|
t.Fatalf("len(reply)=%d, want %d", len(reply), len(testCommands)) |
||||
|
} |
||||
|
for i, cmd := range testCommands { |
||||
|
actual := reply[i] |
||||
|
if !reflect.DeepEqual(actual, cmd.expected) { |
||||
|
t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestRecvBeforeSend(t *testing.T) { |
||||
|
c, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
t.Fatalf("error connection to database, %v", err) |
||||
|
} |
||||
|
defer c.Close() |
||||
|
done := make(chan struct{}) |
||||
|
go func() { |
||||
|
c.Receive() |
||||
|
close(done) |
||||
|
}() |
||||
|
time.Sleep(time.Millisecond) |
||||
|
c.Send("PING") |
||||
|
c.Flush() |
||||
|
<-done |
||||
|
_, err = c.Do("") |
||||
|
if err != nil { |
||||
|
t.Fatalf("error=%v", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestError(t *testing.T) { |
||||
|
c, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
t.Fatalf("error connection to database, %v", err) |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
c.Do("SET", "key", "val") |
||||
|
_, err = c.Do("HSET", "key", "fld", "val") |
||||
|
if err == nil { |
||||
|
t.Errorf("Expected err for HSET on string key.") |
||||
|
} |
||||
|
if c.Err() != nil { |
||||
|
t.Errorf("Conn has Err()=%v, expect nil", c.Err()) |
||||
|
} |
||||
|
_, err = c.Do("SET", "key", "val") |
||||
|
if err != nil { |
||||
|
t.Errorf("Do(SET, key, val) returned error %v, expected nil.", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestReadTimeout(t *testing.T) { |
||||
|
l, err := net.Listen("tcp", "127.0.0.1:0") |
||||
|
if err != nil { |
||||
|
t.Fatalf("net.Listen returned %v", err) |
||||
|
} |
||||
|
defer l.Close() |
||||
|
|
||||
|
go func() { |
||||
|
for { |
||||
|
c, err := l.Accept() |
||||
|
if err != nil { |
||||
|
return |
||||
|
} |
||||
|
go func() { |
||||
|
time.Sleep(time.Second) |
||||
|
c.Write([]byte("+OK\r\n")) |
||||
|
c.Close() |
||||
|
}() |
||||
|
} |
||||
|
}() |
||||
|
|
||||
|
// Do
|
||||
|
|
||||
|
c1, err := redis.Dial(l.Addr().Network(), l.Addr().String(), redis.DialReadTimeout(time.Millisecond)) |
||||
|
if err != nil { |
||||
|
t.Fatalf("redis.Dial returned %v", err) |
||||
|
} |
||||
|
defer c1.Close() |
||||
|
|
||||
|
_, err = c1.Do("PING") |
||||
|
if err == nil { |
||||
|
t.Fatalf("c1.Do() returned nil, expect error") |
||||
|
} |
||||
|
if c1.Err() == nil { |
||||
|
t.Fatalf("c1.Err() = nil, expect error") |
||||
|
} |
||||
|
|
||||
|
// Send/Flush/Receive
|
||||
|
|
||||
|
c2, err := redis.Dial(l.Addr().Network(), l.Addr().String(), redis.DialReadTimeout(time.Millisecond)) |
||||
|
if err != nil { |
||||
|
t.Fatalf("redis.Dial returned %v", err) |
||||
|
} |
||||
|
defer c2.Close() |
||||
|
|
||||
|
c2.Send("PING") |
||||
|
c2.Flush() |
||||
|
_, err = c2.Receive() |
||||
|
if err == nil { |
||||
|
t.Fatalf("c2.Receive() returned nil, expect error") |
||||
|
} |
||||
|
if c2.Err() == nil { |
||||
|
t.Fatalf("c2.Err() = nil, expect error") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
var dialErrors = []struct { |
||||
|
rawurl string |
||||
|
expectedError string |
||||
|
}{ |
||||
|
{ |
||||
|
"localhost", |
||||
|
"invalid redis URL scheme", |
||||
|
}, |
||||
|
// The error message for invalid hosts is diffferent in different
|
||||
|
// versions of Go, so just check that there is an error message.
|
||||
|
{ |
||||
|
"redis://weird url", |
||||
|
"", |
||||
|
}, |
||||
|
{ |
||||
|
"redis://foo:bar:baz", |
||||
|
"", |
||||
|
}, |
||||
|
{ |
||||
|
"http://www.google.com", |
||||
|
"invalid redis URL scheme: http", |
||||
|
}, |
||||
|
{ |
||||
|
"redis://localhost:6379/abc123", |
||||
|
"invalid database: abc123", |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
func TestDialURLErrors(t *testing.T) { |
||||
|
for _, d := range dialErrors { |
||||
|
_, err := redis.DialURL(d.rawurl) |
||||
|
if err == nil || !strings.Contains(err.Error(), d.expectedError) { |
||||
|
t.Errorf("DialURL did not return expected error (expected %v to contain %s)", err, d.expectedError) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestDialURLPort(t *testing.T) { |
||||
|
checkPort := func(network, address string) (net.Conn, error) { |
||||
|
if address != "localhost:6379" { |
||||
|
t.Errorf("DialURL did not set port to 6379 by default (got %v)", address) |
||||
|
} |
||||
|
return nil, nil |
||||
|
} |
||||
|
_, err := redis.DialURL("redis://localhost", redis.DialNetDial(checkPort)) |
||||
|
if err != nil { |
||||
|
t.Error("dial error:", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestDialURLHost(t *testing.T) { |
||||
|
checkHost := func(network, address string) (net.Conn, error) { |
||||
|
if address != "localhost:6379" { |
||||
|
t.Errorf("DialURL did not set host to localhost by default (got %v)", address) |
||||
|
} |
||||
|
return nil, nil |
||||
|
} |
||||
|
_, err := redis.DialURL("redis://:6379", redis.DialNetDial(checkHost)) |
||||
|
if err != nil { |
||||
|
t.Error("dial error:", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestDialURLPassword(t *testing.T) { |
||||
|
var buf bytes.Buffer |
||||
|
_, err := redis.DialURL("redis://x:abc123@localhost", dialTestConn(strings.NewReader("+OK\r\n"), &buf)) |
||||
|
if err != nil { |
||||
|
t.Error("dial error:", err) |
||||
|
} |
||||
|
expected := "*2\r\n$4\r\nAUTH\r\n$6\r\nabc123\r\n" |
||||
|
actual := buf.String() |
||||
|
if actual != expected { |
||||
|
t.Errorf("commands = %q, want %q", actual, expected) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestDialURLDatabase(t *testing.T) { |
||||
|
var buf3 bytes.Buffer |
||||
|
_, err3 := redis.DialURL("redis://localhost/3", dialTestConn(strings.NewReader("+OK\r\n"), &buf3)) |
||||
|
if err3 != nil { |
||||
|
t.Error("dial error:", err3) |
||||
|
} |
||||
|
expected3 := "*2\r\n$6\r\nSELECT\r\n$1\r\n3\r\n" |
||||
|
actual3 := buf3.String() |
||||
|
if actual3 != expected3 { |
||||
|
t.Errorf("commands = %q, want %q", actual3, expected3) |
||||
|
} |
||||
|
// empty DB means 0
|
||||
|
var buf0 bytes.Buffer |
||||
|
_, err0 := redis.DialURL("redis://localhost/", dialTestConn(strings.NewReader("+OK\r\n"), &buf0)) |
||||
|
if err0 != nil { |
||||
|
t.Error("dial error:", err0) |
||||
|
} |
||||
|
expected0 := "" |
||||
|
actual0 := buf0.String() |
||||
|
if actual0 != expected0 { |
||||
|
t.Errorf("commands = %q, want %q", actual0, expected0) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Connect to local instance of Redis running on the default port.
|
||||
|
func ExampleDial() { |
||||
|
c, err := redis.Dial("tcp", ":6379") |
||||
|
if err != nil { |
||||
|
// handle error
|
||||
|
} |
||||
|
defer c.Close() |
||||
|
} |
||||
|
|
||||
|
// Connect to remote instance of Redis using a URL.
|
||||
|
func ExampleDialURL() { |
||||
|
c, err := redis.DialURL(os.Getenv("REDIS_URL")) |
||||
|
if err != nil { |
||||
|
// handle connection error
|
||||
|
} |
||||
|
defer c.Close() |
||||
|
} |
||||
|
|
||||
|
// TextExecError tests handling of errors in a transaction. See
|
||||
|
// http://redis.io/topics/transactions for information on how Redis handles
|
||||
|
// errors in a transaction.
|
||||
|
func TestExecError(t *testing.T) { |
||||
|
c, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
t.Fatalf("error connection to database, %v", err) |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
// Execute commands that fail before EXEC is called.
|
||||
|
|
||||
|
c.Do("DEL", "k0") |
||||
|
c.Do("ZADD", "k0", 0, 0) |
||||
|
c.Send("MULTI") |
||||
|
c.Send("NOTACOMMAND", "k0", 0, 0) |
||||
|
c.Send("ZINCRBY", "k0", 0, 0) |
||||
|
v, err := c.Do("EXEC") |
||||
|
if err == nil { |
||||
|
t.Fatalf("EXEC returned values %v, expected error", v) |
||||
|
} |
||||
|
|
||||
|
// Execute commands that fail after EXEC is called. The first command
|
||||
|
// returns an error.
|
||||
|
|
||||
|
c.Do("DEL", "k1") |
||||
|
c.Do("ZADD", "k1", 0, 0) |
||||
|
c.Send("MULTI") |
||||
|
c.Send("HSET", "k1", 0, 0) |
||||
|
c.Send("ZINCRBY", "k1", 0, 0) |
||||
|
v, err = c.Do("EXEC") |
||||
|
if err != nil { |
||||
|
t.Fatalf("EXEC returned error %v", err) |
||||
|
} |
||||
|
|
||||
|
vs, err := redis.Values(v, nil) |
||||
|
if err != nil { |
||||
|
t.Fatalf("Values(v) returned error %v", err) |
||||
|
} |
||||
|
|
||||
|
if len(vs) != 2 { |
||||
|
t.Fatalf("len(vs) == %d, want 2", len(vs)) |
||||
|
} |
||||
|
|
||||
|
if _, ok := vs[0].(error); !ok { |
||||
|
t.Fatalf("first result is type %T, expected error", vs[0]) |
||||
|
} |
||||
|
|
||||
|
if _, ok := vs[1].([]byte); !ok { |
||||
|
t.Fatalf("second result is type %T, expected []byte", vs[1]) |
||||
|
} |
||||
|
|
||||
|
// Execute commands that fail after EXEC is called. The second command
|
||||
|
// returns an error.
|
||||
|
|
||||
|
c.Do("ZADD", "k2", 0, 0) |
||||
|
c.Send("MULTI") |
||||
|
c.Send("ZINCRBY", "k2", 0, 0) |
||||
|
c.Send("HSET", "k2", 0, 0) |
||||
|
v, err = c.Do("EXEC") |
||||
|
if err != nil { |
||||
|
t.Fatalf("EXEC returned error %v", err) |
||||
|
} |
||||
|
|
||||
|
vs, err = redis.Values(v, nil) |
||||
|
if err != nil { |
||||
|
t.Fatalf("Values(v) returned error %v", err) |
||||
|
} |
||||
|
|
||||
|
if len(vs) != 2 { |
||||
|
t.Fatalf("len(vs) == %d, want 2", len(vs)) |
||||
|
} |
||||
|
|
||||
|
if _, ok := vs[0].([]byte); !ok { |
||||
|
t.Fatalf("first result is type %T, expected []byte", vs[0]) |
||||
|
} |
||||
|
|
||||
|
if _, ok := vs[1].(error); !ok { |
||||
|
t.Fatalf("second result is type %T, expected error", vs[2]) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDoEmpty(b *testing.B) { |
||||
|
b.StopTimer() |
||||
|
c, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
defer c.Close() |
||||
|
b.StartTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
if _, err := c.Do(""); err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDoPing(b *testing.B) { |
||||
|
b.StopTimer() |
||||
|
c, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
defer c.Close() |
||||
|
b.StartTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
if _, err := c.Do("PING"); err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,168 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
// Package redis is a client for the Redis database.
|
||||
|
//
|
||||
|
// The Redigo FAQ (https://github.com/garyburd/redigo/wiki/FAQ) contains more
|
||||
|
// documentation about this package.
|
||||
|
//
|
||||
|
// Connections
|
||||
|
//
|
||||
|
// The Conn interface is the primary interface for working with Redis.
|
||||
|
// Applications create connections by calling the Dial, DialWithTimeout or
|
||||
|
// NewConn functions. In the future, functions will be added for creating
|
||||
|
// sharded and other types of connections.
|
||||
|
//
|
||||
|
// The application must call the connection Close method when the application
|
||||
|
// is done with the connection.
|
||||
|
//
|
||||
|
// Executing Commands
|
||||
|
//
|
||||
|
// The Conn interface has a generic method for executing Redis commands:
|
||||
|
//
|
||||
|
// Do(commandName string, args ...interface{}) (reply interface{}, err error)
|
||||
|
//
|
||||
|
// The Redis command reference (http://redis.io/commands) lists the available
|
||||
|
// commands. An example of using the Redis APPEND command is:
|
||||
|
//
|
||||
|
// n, err := conn.Do("APPEND", "key", "value")
|
||||
|
//
|
||||
|
// The Do method converts command arguments to binary strings for transmission
|
||||
|
// to the server as follows:
|
||||
|
//
|
||||
|
// Go Type Conversion
|
||||
|
// []byte Sent as is
|
||||
|
// string Sent as is
|
||||
|
// int, int64 strconv.FormatInt(v)
|
||||
|
// float64 strconv.FormatFloat(v, 'g', -1, 64)
|
||||
|
// bool true -> "1", false -> "0"
|
||||
|
// nil ""
|
||||
|
// all other types fmt.Print(v)
|
||||
|
//
|
||||
|
// Redis command reply types are represented using the following Go types:
|
||||
|
//
|
||||
|
// Redis type Go type
|
||||
|
// error redis.Error
|
||||
|
// integer int64
|
||||
|
// simple string string
|
||||
|
// bulk string []byte or nil if value not present.
|
||||
|
// array []interface{} or nil if value not present.
|
||||
|
//
|
||||
|
// Use type assertions or the reply helper functions to convert from
|
||||
|
// interface{} to the specific Go type for the command result.
|
||||
|
//
|
||||
|
// Pipelining
|
||||
|
//
|
||||
|
// Connections support pipelining using the Send, Flush and Receive methods.
|
||||
|
//
|
||||
|
// Send(commandName string, args ...interface{}) error
|
||||
|
// Flush() error
|
||||
|
// Receive() (reply interface{}, err error)
|
||||
|
//
|
||||
|
// Send writes the command to the connection's output buffer. Flush flushes the
|
||||
|
// connection's output buffer to the server. Receive reads a single reply from
|
||||
|
// the server. The following example shows a simple pipeline.
|
||||
|
//
|
||||
|
// c.Send("SET", "foo", "bar")
|
||||
|
// c.Send("GET", "foo")
|
||||
|
// c.Flush()
|
||||
|
// c.Receive() // reply from SET
|
||||
|
// v, err = c.Receive() // reply from GET
|
||||
|
//
|
||||
|
// The Do method combines the functionality of the Send, Flush and Receive
|
||||
|
// methods. The Do method starts by writing the command and flushing the output
|
||||
|
// buffer. Next, the Do method receives all pending replies including the reply
|
||||
|
// for the command just sent by Do. If any of the received replies is an error,
|
||||
|
// then Do returns the error. If there are no errors, then Do returns the last
|
||||
|
// reply. If the command argument to the Do method is "", then the Do method
|
||||
|
// will flush the output buffer and receive pending replies without sending a
|
||||
|
// command.
|
||||
|
//
|
||||
|
// Use the Send and Do methods to implement pipelined transactions.
|
||||
|
//
|
||||
|
// c.Send("MULTI")
|
||||
|
// c.Send("INCR", "foo")
|
||||
|
// c.Send("INCR", "bar")
|
||||
|
// r, err := c.Do("EXEC")
|
||||
|
// fmt.Println(r) // prints [1, 1]
|
||||
|
//
|
||||
|
// Concurrency
|
||||
|
//
|
||||
|
// Connections support one concurrent caller to the Receive method and one
|
||||
|
// concurrent caller to the Send and Flush methods. No other concurrency is
|
||||
|
// supported including concurrent calls to the Do method.
|
||||
|
//
|
||||
|
// For full concurrent access to Redis, use the thread-safe Pool to get, use
|
||||
|
// and release a connection from within a goroutine. Connections returned from
|
||||
|
// a Pool have the concurrency restrictions described in the previous
|
||||
|
// paragraph.
|
||||
|
//
|
||||
|
// Publish and Subscribe
|
||||
|
//
|
||||
|
// Use the Send, Flush and Receive methods to implement Pub/Sub subscribers.
|
||||
|
//
|
||||
|
// c.Send("SUBSCRIBE", "example")
|
||||
|
// c.Flush()
|
||||
|
// for {
|
||||
|
// reply, err := c.Receive()
|
||||
|
// if err != nil {
|
||||
|
// return err
|
||||
|
// }
|
||||
|
// // process pushed message
|
||||
|
// }
|
||||
|
//
|
||||
|
// The PubSubConn type wraps a Conn with convenience methods for implementing
|
||||
|
// subscribers. The Subscribe, PSubscribe, Unsubscribe and PUnsubscribe methods
|
||||
|
// send and flush a subscription management command. The receive method
|
||||
|
// converts a pushed message to convenient types for use in a type switch.
|
||||
|
//
|
||||
|
// psc := redis.PubSubConn{c}
|
||||
|
// psc.Subscribe("example")
|
||||
|
// for {
|
||||
|
// switch v := psc.Receive().(type) {
|
||||
|
// case redis.Message:
|
||||
|
// fmt.Printf("%s: message: %s\n", v.Channel, v.Data)
|
||||
|
// case redis.Subscription:
|
||||
|
// fmt.Printf("%s: %s %d\n", v.Channel, v.Kind, v.Count)
|
||||
|
// case error:
|
||||
|
// return v
|
||||
|
// }
|
||||
|
// }
|
||||
|
//
|
||||
|
// Reply Helpers
|
||||
|
//
|
||||
|
// The Bool, Int, Bytes, String, Strings and Values functions convert a reply
|
||||
|
// to a value of a specific type. To allow convenient wrapping of calls to the
|
||||
|
// connection Do and Receive methods, the functions take a second argument of
|
||||
|
// type error. If the error is non-nil, then the helper function returns the
|
||||
|
// error. If the error is nil, the function converts the reply to the specified
|
||||
|
// type:
|
||||
|
//
|
||||
|
// exists, err := redis.Bool(c.Do("EXISTS", "foo"))
|
||||
|
// if err != nil {
|
||||
|
// // handle error return from c.Do or type conversion error.
|
||||
|
// }
|
||||
|
//
|
||||
|
// The Scan function converts elements of a array reply to Go types:
|
||||
|
//
|
||||
|
// var value1 int
|
||||
|
// var value2 string
|
||||
|
// reply, err := redis.Values(c.Do("MGET", "key1", "key2"))
|
||||
|
// if err != nil {
|
||||
|
// // handle error
|
||||
|
// }
|
||||
|
// if _, err := redis.Scan(reply, &value1, &value2); err != nil {
|
||||
|
// // handle error
|
||||
|
// }
|
||||
|
package redis // import "github.com/garyburd/redigo/redis"
|
@ -0,0 +1,117 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"fmt" |
||||
|
"log" |
||||
|
) |
||||
|
|
||||
|
// NewLoggingConn returns a logging wrapper around a connection.
|
||||
|
func NewLoggingConn(conn Conn, logger *log.Logger, prefix string) Conn { |
||||
|
if prefix != "" { |
||||
|
prefix = prefix + "." |
||||
|
} |
||||
|
return &loggingConn{conn, logger, prefix} |
||||
|
} |
||||
|
|
||||
|
type loggingConn struct { |
||||
|
Conn |
||||
|
logger *log.Logger |
||||
|
prefix string |
||||
|
} |
||||
|
|
||||
|
func (c *loggingConn) Close() error { |
||||
|
err := c.Conn.Close() |
||||
|
var buf bytes.Buffer |
||||
|
fmt.Fprintf(&buf, "%sClose() -> (%v)", c.prefix, err) |
||||
|
c.logger.Output(2, buf.String()) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (c *loggingConn) printValue(buf *bytes.Buffer, v interface{}) { |
||||
|
const chop = 32 |
||||
|
switch v := v.(type) { |
||||
|
case []byte: |
||||
|
if len(v) > chop { |
||||
|
fmt.Fprintf(buf, "%q...", v[:chop]) |
||||
|
} else { |
||||
|
fmt.Fprintf(buf, "%q", v) |
||||
|
} |
||||
|
case string: |
||||
|
if len(v) > chop { |
||||
|
fmt.Fprintf(buf, "%q...", v[:chop]) |
||||
|
} else { |
||||
|
fmt.Fprintf(buf, "%q", v) |
||||
|
} |
||||
|
case []interface{}: |
||||
|
if len(v) == 0 { |
||||
|
buf.WriteString("[]") |
||||
|
} else { |
||||
|
sep := "[" |
||||
|
fin := "]" |
||||
|
if len(v) > chop { |
||||
|
v = v[:chop] |
||||
|
fin = "...]" |
||||
|
} |
||||
|
for _, vv := range v { |
||||
|
buf.WriteString(sep) |
||||
|
c.printValue(buf, vv) |
||||
|
sep = ", " |
||||
|
} |
||||
|
buf.WriteString(fin) |
||||
|
} |
||||
|
default: |
||||
|
fmt.Fprint(buf, v) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (c *loggingConn) print(method, commandName string, args []interface{}, reply interface{}, err error) { |
||||
|
var buf bytes.Buffer |
||||
|
fmt.Fprintf(&buf, "%s%s(", c.prefix, method) |
||||
|
if method != "Receive" { |
||||
|
buf.WriteString(commandName) |
||||
|
for _, arg := range args { |
||||
|
buf.WriteString(", ") |
||||
|
c.printValue(&buf, arg) |
||||
|
} |
||||
|
} |
||||
|
buf.WriteString(") -> (") |
||||
|
if method != "Send" { |
||||
|
c.printValue(&buf, reply) |
||||
|
buf.WriteString(", ") |
||||
|
} |
||||
|
fmt.Fprintf(&buf, "%v)", err) |
||||
|
c.logger.Output(3, buf.String()) |
||||
|
} |
||||
|
|
||||
|
func (c *loggingConn) Do(commandName string, args ...interface{}) (interface{}, error) { |
||||
|
reply, err := c.Conn.Do(commandName, args...) |
||||
|
c.print("Do", commandName, args, reply, err) |
||||
|
return reply, err |
||||
|
} |
||||
|
|
||||
|
func (c *loggingConn) Send(commandName string, args ...interface{}) error { |
||||
|
err := c.Conn.Send(commandName, args...) |
||||
|
c.print("Send", commandName, args, nil, err) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (c *loggingConn) Receive() (interface{}, error) { |
||||
|
reply, err := c.Conn.Receive() |
||||
|
c.print("Receive", "", nil, reply, err) |
||||
|
return reply, err |
||||
|
} |
@ -0,0 +1,397 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"container/list" |
||||
|
"crypto/rand" |
||||
|
"crypto/sha1" |
||||
|
"errors" |
||||
|
"io" |
||||
|
"strconv" |
||||
|
"sync" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/garyburd/redigo/internal" |
||||
|
) |
||||
|
|
||||
|
var nowFunc = time.Now // for testing
|
||||
|
|
||||
|
// ErrPoolExhausted is returned from a pool connection method (Do, Send,
|
||||
|
// Receive, Flush, Err) when the maximum number of database connections in the
|
||||
|
// pool has been reached.
|
||||
|
var ErrPoolExhausted = errors.New("redigo: connection pool exhausted") |
||||
|
|
||||
|
var ( |
||||
|
errPoolClosed = errors.New("redigo: connection pool closed") |
||||
|
errConnClosed = errors.New("redigo: connection closed") |
||||
|
) |
||||
|
|
||||
|
// Pool maintains a pool of connections. The application calls the Get method
|
||||
|
// to get a connection from the pool and the connection's Close method to
|
||||
|
// return the connection's resources to the pool.
|
||||
|
//
|
||||
|
// The following example shows how to use a pool in a web application. The
|
||||
|
// application creates a pool at application startup and makes it available to
|
||||
|
// request handlers using a global variable. The pool configuration used here
|
||||
|
// is an example, not a recommendation.
|
||||
|
//
|
||||
|
// func newPool(server, password string) *redis.Pool {
|
||||
|
// return &redis.Pool{
|
||||
|
// MaxIdle: 3,
|
||||
|
// IdleTimeout: 240 * time.Second,
|
||||
|
// Dial: func () (redis.Conn, error) {
|
||||
|
// c, err := redis.Dial("tcp", server)
|
||||
|
// if err != nil {
|
||||
|
// return nil, err
|
||||
|
// }
|
||||
|
// if _, err := c.Do("AUTH", password); err != nil {
|
||||
|
// c.Close()
|
||||
|
// return nil, err
|
||||
|
// }
|
||||
|
// return c, err
|
||||
|
// },
|
||||
|
// TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
||||
|
// if time.Since(t) < time.Minute {
|
||||
|
// return nil
|
||||
|
// }
|
||||
|
// _, err := c.Do("PING")
|
||||
|
// return err
|
||||
|
// },
|
||||
|
// }
|
||||
|
// }
|
||||
|
//
|
||||
|
// var (
|
||||
|
// pool *redis.Pool
|
||||
|
// redisServer = flag.String("redisServer", ":6379", "")
|
||||
|
// redisPassword = flag.String("redisPassword", "", "")
|
||||
|
// )
|
||||
|
//
|
||||
|
// func main() {
|
||||
|
// flag.Parse()
|
||||
|
// pool = newPool(*redisServer, *redisPassword)
|
||||
|
// ...
|
||||
|
// }
|
||||
|
//
|
||||
|
// A request handler gets a connection from the pool and closes the connection
|
||||
|
// when the handler is done:
|
||||
|
//
|
||||
|
// func serveHome(w http.ResponseWriter, r *http.Request) {
|
||||
|
// conn := pool.Get()
|
||||
|
// defer conn.Close()
|
||||
|
// ....
|
||||
|
// }
|
||||
|
//
|
||||
|
type Pool struct { |
||||
|
|
||||
|
// Dial is an application supplied function for creating and configuring a
|
||||
|
// connection.
|
||||
|
//
|
||||
|
// The connection returned from Dial must not be in a special state
|
||||
|
// (subscribed to pubsub channel, transaction started, ...).
|
||||
|
Dial func() (Conn, error) |
||||
|
|
||||
|
// TestOnBorrow is an optional application supplied function for checking
|
||||
|
// the health of an idle connection before the connection is used again by
|
||||
|
// the application. Argument t is the time that the connection was returned
|
||||
|
// to the pool. If the function returns an error, then the connection is
|
||||
|
// closed.
|
||||
|
TestOnBorrow func(c Conn, t time.Time) error |
||||
|
|
||||
|
// Maximum number of idle connections in the pool.
|
||||
|
MaxIdle int |
||||
|
|
||||
|
// Maximum number of connections allocated by the pool at a given time.
|
||||
|
// When zero, there is no limit on the number of connections in the pool.
|
||||
|
MaxActive int |
||||
|
|
||||
|
// Close connections after remaining idle for this duration. If the value
|
||||
|
// is zero, then idle connections are not closed. Applications should set
|
||||
|
// the timeout to a value less than the server's timeout.
|
||||
|
IdleTimeout time.Duration |
||||
|
|
||||
|
// If Wait is true and the pool is at the MaxActive limit, then Get() waits
|
||||
|
// for a connection to be returned to the pool before returning.
|
||||
|
Wait bool |
||||
|
|
||||
|
// mu protects fields defined below.
|
||||
|
mu sync.Mutex |
||||
|
cond *sync.Cond |
||||
|
closed bool |
||||
|
active int |
||||
|
|
||||
|
// Stack of idleConn with most recently used at the front.
|
||||
|
idle list.List |
||||
|
} |
||||
|
|
||||
|
type idleConn struct { |
||||
|
c Conn |
||||
|
t time.Time |
||||
|
} |
||||
|
|
||||
|
// NewPool creates a new pool.
|
||||
|
//
|
||||
|
// Deprecated: Initialize the Pool directory as shown in the example.
|
||||
|
func NewPool(newFn func() (Conn, error), maxIdle int) *Pool { |
||||
|
return &Pool{Dial: newFn, MaxIdle: maxIdle} |
||||
|
} |
||||
|
|
||||
|
// Get gets a connection. The application must close the returned connection.
|
||||
|
// This method always returns a valid connection so that applications can defer
|
||||
|
// error handling to the first use of the connection. If there is an error
|
||||
|
// getting an underlying connection, then the connection Err, Do, Send, Flush
|
||||
|
// and Receive methods return that error.
|
||||
|
func (p *Pool) Get() Conn { |
||||
|
c, err := p.get() |
||||
|
if err != nil { |
||||
|
return errorConnection{err} |
||||
|
} |
||||
|
return &pooledConnection{p: p, c: c} |
||||
|
} |
||||
|
|
||||
|
// ActiveCount returns the number of active connections in the pool.
|
||||
|
func (p *Pool) ActiveCount() int { |
||||
|
p.mu.Lock() |
||||
|
active := p.active |
||||
|
p.mu.Unlock() |
||||
|
return active |
||||
|
} |
||||
|
|
||||
|
// Close releases the resources used by the pool.
|
||||
|
func (p *Pool) Close() error { |
||||
|
p.mu.Lock() |
||||
|
idle := p.idle |
||||
|
p.idle.Init() |
||||
|
p.closed = true |
||||
|
p.active -= idle.Len() |
||||
|
if p.cond != nil { |
||||
|
p.cond.Broadcast() |
||||
|
} |
||||
|
p.mu.Unlock() |
||||
|
for e := idle.Front(); e != nil; e = e.Next() { |
||||
|
e.Value.(idleConn).c.Close() |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// release decrements the active count and signals waiters. The caller must
|
||||
|
// hold p.mu during the call.
|
||||
|
func (p *Pool) release() { |
||||
|
p.active -= 1 |
||||
|
if p.cond != nil { |
||||
|
p.cond.Signal() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// get prunes stale connections and returns a connection from the idle list or
|
||||
|
// creates a new connection.
|
||||
|
func (p *Pool) get() (Conn, error) { |
||||
|
p.mu.Lock() |
||||
|
|
||||
|
// Prune stale connections.
|
||||
|
|
||||
|
if timeout := p.IdleTimeout; timeout > 0 { |
||||
|
for i, n := 0, p.idle.Len(); i < n; i++ { |
||||
|
e := p.idle.Back() |
||||
|
if e == nil { |
||||
|
break |
||||
|
} |
||||
|
ic := e.Value.(idleConn) |
||||
|
if ic.t.Add(timeout).After(nowFunc()) { |
||||
|
break |
||||
|
} |
||||
|
p.idle.Remove(e) |
||||
|
p.release() |
||||
|
p.mu.Unlock() |
||||
|
ic.c.Close() |
||||
|
p.mu.Lock() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
for { |
||||
|
|
||||
|
// Get idle connection.
|
||||
|
|
||||
|
for i, n := 0, p.idle.Len(); i < n; i++ { |
||||
|
e := p.idle.Front() |
||||
|
if e == nil { |
||||
|
break |
||||
|
} |
||||
|
ic := e.Value.(idleConn) |
||||
|
p.idle.Remove(e) |
||||
|
test := p.TestOnBorrow |
||||
|
p.mu.Unlock() |
||||
|
if test == nil || test(ic.c, ic.t) == nil { |
||||
|
return ic.c, nil |
||||
|
} |
||||
|
ic.c.Close() |
||||
|
p.mu.Lock() |
||||
|
p.release() |
||||
|
} |
||||
|
|
||||
|
// Check for pool closed before dialing a new connection.
|
||||
|
|
||||
|
if p.closed { |
||||
|
p.mu.Unlock() |
||||
|
return nil, errors.New("redigo: get on closed pool") |
||||
|
} |
||||
|
|
||||
|
// Dial new connection if under limit.
|
||||
|
|
||||
|
if p.MaxActive == 0 || p.active < p.MaxActive { |
||||
|
dial := p.Dial |
||||
|
p.active += 1 |
||||
|
p.mu.Unlock() |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
p.mu.Lock() |
||||
|
p.release() |
||||
|
p.mu.Unlock() |
||||
|
c = nil |
||||
|
} |
||||
|
return c, err |
||||
|
} |
||||
|
|
||||
|
if !p.Wait { |
||||
|
p.mu.Unlock() |
||||
|
return nil, ErrPoolExhausted |
||||
|
} |
||||
|
|
||||
|
if p.cond == nil { |
||||
|
p.cond = sync.NewCond(&p.mu) |
||||
|
} |
||||
|
p.cond.Wait() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (p *Pool) put(c Conn, forceClose bool) error { |
||||
|
err := c.Err() |
||||
|
p.mu.Lock() |
||||
|
if !p.closed && err == nil && !forceClose { |
||||
|
p.idle.PushFront(idleConn{t: nowFunc(), c: c}) |
||||
|
if p.idle.Len() > p.MaxIdle { |
||||
|
c = p.idle.Remove(p.idle.Back()).(idleConn).c |
||||
|
} else { |
||||
|
c = nil |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if c == nil { |
||||
|
if p.cond != nil { |
||||
|
p.cond.Signal() |
||||
|
} |
||||
|
p.mu.Unlock() |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
p.release() |
||||
|
p.mu.Unlock() |
||||
|
return c.Close() |
||||
|
} |
||||
|
|
||||
|
type pooledConnection struct { |
||||
|
p *Pool |
||||
|
c Conn |
||||
|
state int |
||||
|
} |
||||
|
|
||||
|
var ( |
||||
|
sentinel []byte |
||||
|
sentinelOnce sync.Once |
||||
|
) |
||||
|
|
||||
|
func initSentinel() { |
||||
|
p := make([]byte, 64) |
||||
|
if _, err := rand.Read(p); err == nil { |
||||
|
sentinel = p |
||||
|
} else { |
||||
|
h := sha1.New() |
||||
|
io.WriteString(h, "Oops, rand failed. Use time instead.") |
||||
|
io.WriteString(h, strconv.FormatInt(time.Now().UnixNano(), 10)) |
||||
|
sentinel = h.Sum(nil) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (pc *pooledConnection) Close() error { |
||||
|
c := pc.c |
||||
|
if _, ok := c.(errorConnection); ok { |
||||
|
return nil |
||||
|
} |
||||
|
pc.c = errorConnection{errConnClosed} |
||||
|
|
||||
|
if pc.state&internal.MultiState != 0 { |
||||
|
c.Send("DISCARD") |
||||
|
pc.state &^= (internal.MultiState | internal.WatchState) |
||||
|
} else if pc.state&internal.WatchState != 0 { |
||||
|
c.Send("UNWATCH") |
||||
|
pc.state &^= internal.WatchState |
||||
|
} |
||||
|
if pc.state&internal.SubscribeState != 0 { |
||||
|
c.Send("UNSUBSCRIBE") |
||||
|
c.Send("PUNSUBSCRIBE") |
||||
|
// To detect the end of the message stream, ask the server to echo
|
||||
|
// a sentinel value and read until we see that value.
|
||||
|
sentinelOnce.Do(initSentinel) |
||||
|
c.Send("ECHO", sentinel) |
||||
|
c.Flush() |
||||
|
for { |
||||
|
p, err := c.Receive() |
||||
|
if err != nil { |
||||
|
break |
||||
|
} |
||||
|
if p, ok := p.([]byte); ok && bytes.Equal(p, sentinel) { |
||||
|
pc.state &^= internal.SubscribeState |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
c.Do("") |
||||
|
pc.p.put(c, pc.state != 0) |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (pc *pooledConnection) Err() error { |
||||
|
return pc.c.Err() |
||||
|
} |
||||
|
|
||||
|
func (pc *pooledConnection) Do(commandName string, args ...interface{}) (reply interface{}, err error) { |
||||
|
ci := internal.LookupCommandInfo(commandName) |
||||
|
pc.state = (pc.state | ci.Set) &^ ci.Clear |
||||
|
return pc.c.Do(commandName, args...) |
||||
|
} |
||||
|
|
||||
|
func (pc *pooledConnection) Send(commandName string, args ...interface{}) error { |
||||
|
ci := internal.LookupCommandInfo(commandName) |
||||
|
pc.state = (pc.state | ci.Set) &^ ci.Clear |
||||
|
return pc.c.Send(commandName, args...) |
||||
|
} |
||||
|
|
||||
|
func (pc *pooledConnection) Flush() error { |
||||
|
return pc.c.Flush() |
||||
|
} |
||||
|
|
||||
|
func (pc *pooledConnection) Receive() (reply interface{}, err error) { |
||||
|
return pc.c.Receive() |
||||
|
} |
||||
|
|
||||
|
type errorConnection struct{ err error } |
||||
|
|
||||
|
func (ec errorConnection) Do(string, ...interface{}) (interface{}, error) { return nil, ec.err } |
||||
|
func (ec errorConnection) Send(string, ...interface{}) error { return ec.err } |
||||
|
func (ec errorConnection) Err() error { return ec.err } |
||||
|
func (ec errorConnection) Close() error { return ec.err } |
||||
|
func (ec errorConnection) Flush() error { return ec.err } |
||||
|
func (ec errorConnection) Receive() (interface{}, error) { return nil, ec.err } |
@ -0,0 +1,684 @@ |
|||||
|
// Copyright 2011 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis_test |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"io" |
||||
|
"reflect" |
||||
|
"sync" |
||||
|
"testing" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/garyburd/redigo/redis" |
||||
|
) |
||||
|
|
||||
|
type poolTestConn struct { |
||||
|
d *poolDialer |
||||
|
err error |
||||
|
redis.Conn |
||||
|
} |
||||
|
|
||||
|
func (c *poolTestConn) Close() error { |
||||
|
c.d.mu.Lock() |
||||
|
c.d.open -= 1 |
||||
|
c.d.mu.Unlock() |
||||
|
return c.Conn.Close() |
||||
|
} |
||||
|
|
||||
|
func (c *poolTestConn) Err() error { return c.err } |
||||
|
|
||||
|
func (c *poolTestConn) Do(commandName string, args ...interface{}) (interface{}, error) { |
||||
|
if commandName == "ERR" { |
||||
|
c.err = args[0].(error) |
||||
|
commandName = "PING" |
||||
|
} |
||||
|
if commandName != "" { |
||||
|
c.d.commands = append(c.d.commands, commandName) |
||||
|
} |
||||
|
return c.Conn.Do(commandName, args...) |
||||
|
} |
||||
|
|
||||
|
func (c *poolTestConn) Send(commandName string, args ...interface{}) error { |
||||
|
c.d.commands = append(c.d.commands, commandName) |
||||
|
return c.Conn.Send(commandName, args...) |
||||
|
} |
||||
|
|
||||
|
type poolDialer struct { |
||||
|
mu sync.Mutex |
||||
|
t *testing.T |
||||
|
dialed int |
||||
|
open int |
||||
|
commands []string |
||||
|
dialErr error |
||||
|
} |
||||
|
|
||||
|
func (d *poolDialer) dial() (redis.Conn, error) { |
||||
|
d.mu.Lock() |
||||
|
d.dialed += 1 |
||||
|
dialErr := d.dialErr |
||||
|
d.mu.Unlock() |
||||
|
if dialErr != nil { |
||||
|
return nil, d.dialErr |
||||
|
} |
||||
|
c, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
d.mu.Lock() |
||||
|
d.open += 1 |
||||
|
d.mu.Unlock() |
||||
|
return &poolTestConn{d: d, Conn: c}, nil |
||||
|
} |
||||
|
|
||||
|
func (d *poolDialer) check(message string, p *redis.Pool, dialed, open int) { |
||||
|
d.mu.Lock() |
||||
|
if d.dialed != dialed { |
||||
|
d.t.Errorf("%s: dialed=%d, want %d", message, d.dialed, dialed) |
||||
|
} |
||||
|
if d.open != open { |
||||
|
d.t.Errorf("%s: open=%d, want %d", message, d.open, open) |
||||
|
} |
||||
|
if active := p.ActiveCount(); active != open { |
||||
|
d.t.Errorf("%s: active=%d, want %d", message, active, open) |
||||
|
} |
||||
|
d.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
func TestPoolReuse(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 2, |
||||
|
Dial: d.dial, |
||||
|
} |
||||
|
|
||||
|
for i := 0; i < 10; i++ { |
||||
|
c1 := p.Get() |
||||
|
c1.Do("PING") |
||||
|
c2 := p.Get() |
||||
|
c2.Do("PING") |
||||
|
c1.Close() |
||||
|
c2.Close() |
||||
|
} |
||||
|
|
||||
|
d.check("before close", p, 2, 2) |
||||
|
p.Close() |
||||
|
d.check("after close", p, 2, 0) |
||||
|
} |
||||
|
|
||||
|
func TestPoolMaxIdle(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 2, |
||||
|
Dial: d.dial, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
for i := 0; i < 10; i++ { |
||||
|
c1 := p.Get() |
||||
|
c1.Do("PING") |
||||
|
c2 := p.Get() |
||||
|
c2.Do("PING") |
||||
|
c3 := p.Get() |
||||
|
c3.Do("PING") |
||||
|
c1.Close() |
||||
|
c2.Close() |
||||
|
c3.Close() |
||||
|
} |
||||
|
d.check("before close", p, 12, 2) |
||||
|
p.Close() |
||||
|
d.check("after close", p, 12, 0) |
||||
|
} |
||||
|
|
||||
|
func TestPoolError(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 2, |
||||
|
Dial: d.dial, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c := p.Get() |
||||
|
c.Do("ERR", io.EOF) |
||||
|
if c.Err() == nil { |
||||
|
t.Errorf("expected c.Err() != nil") |
||||
|
} |
||||
|
c.Close() |
||||
|
|
||||
|
c = p.Get() |
||||
|
c.Do("ERR", io.EOF) |
||||
|
c.Close() |
||||
|
|
||||
|
d.check(".", p, 2, 0) |
||||
|
} |
||||
|
|
||||
|
func TestPoolClose(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 2, |
||||
|
Dial: d.dial, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c1 := p.Get() |
||||
|
c1.Do("PING") |
||||
|
c2 := p.Get() |
||||
|
c2.Do("PING") |
||||
|
c3 := p.Get() |
||||
|
c3.Do("PING") |
||||
|
|
||||
|
c1.Close() |
||||
|
if _, err := c1.Do("PING"); err == nil { |
||||
|
t.Errorf("expected error after connection closed") |
||||
|
} |
||||
|
|
||||
|
c2.Close() |
||||
|
c2.Close() |
||||
|
|
||||
|
p.Close() |
||||
|
|
||||
|
d.check("after pool close", p, 3, 1) |
||||
|
|
||||
|
if _, err := c1.Do("PING"); err == nil { |
||||
|
t.Errorf("expected error after connection and pool closed") |
||||
|
} |
||||
|
|
||||
|
c3.Close() |
||||
|
|
||||
|
d.check("after conn close", p, 3, 0) |
||||
|
|
||||
|
c1 = p.Get() |
||||
|
if _, err := c1.Do("PING"); err == nil { |
||||
|
t.Errorf("expected error after pool closed") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestPoolTimeout(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 2, |
||||
|
IdleTimeout: 300 * time.Second, |
||||
|
Dial: d.dial, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
now := time.Now() |
||||
|
redis.SetNowFunc(func() time.Time { return now }) |
||||
|
defer redis.SetNowFunc(time.Now) |
||||
|
|
||||
|
c := p.Get() |
||||
|
c.Do("PING") |
||||
|
c.Close() |
||||
|
|
||||
|
d.check("1", p, 1, 1) |
||||
|
|
||||
|
now = now.Add(p.IdleTimeout) |
||||
|
|
||||
|
c = p.Get() |
||||
|
c.Do("PING") |
||||
|
c.Close() |
||||
|
|
||||
|
d.check("2", p, 2, 1) |
||||
|
} |
||||
|
|
||||
|
func TestPoolConcurrenSendReceive(t *testing.T) { |
||||
|
p := &redis.Pool{ |
||||
|
Dial: redis.DialDefaultServer, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c := p.Get() |
||||
|
done := make(chan error, 1) |
||||
|
go func() { |
||||
|
_, err := c.Receive() |
||||
|
done <- err |
||||
|
}() |
||||
|
c.Send("PING") |
||||
|
c.Flush() |
||||
|
err := <-done |
||||
|
if err != nil { |
||||
|
t.Fatalf("Receive() returned error %v", err) |
||||
|
} |
||||
|
_, err = c.Do("") |
||||
|
if err != nil { |
||||
|
t.Fatalf("Do() returned error %v", err) |
||||
|
} |
||||
|
c.Close() |
||||
|
} |
||||
|
|
||||
|
func TestPoolBorrowCheck(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 2, |
||||
|
Dial: d.dial, |
||||
|
TestOnBorrow: func(redis.Conn, time.Time) error { return redis.Error("BLAH") }, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
for i := 0; i < 10; i++ { |
||||
|
c := p.Get() |
||||
|
c.Do("PING") |
||||
|
c.Close() |
||||
|
} |
||||
|
d.check("1", p, 10, 1) |
||||
|
} |
||||
|
|
||||
|
func TestPoolMaxActive(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 2, |
||||
|
MaxActive: 2, |
||||
|
Dial: d.dial, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c1 := p.Get() |
||||
|
c1.Do("PING") |
||||
|
c2 := p.Get() |
||||
|
c2.Do("PING") |
||||
|
|
||||
|
d.check("1", p, 2, 2) |
||||
|
|
||||
|
c3 := p.Get() |
||||
|
if _, err := c3.Do("PING"); err != redis.ErrPoolExhausted { |
||||
|
t.Errorf("expected pool exhausted") |
||||
|
} |
||||
|
|
||||
|
c3.Close() |
||||
|
d.check("2", p, 2, 2) |
||||
|
c2.Close() |
||||
|
d.check("3", p, 2, 2) |
||||
|
|
||||
|
c3 = p.Get() |
||||
|
if _, err := c3.Do("PING"); err != nil { |
||||
|
t.Errorf("expected good channel, err=%v", err) |
||||
|
} |
||||
|
c3.Close() |
||||
|
|
||||
|
d.check("4", p, 2, 2) |
||||
|
} |
||||
|
|
||||
|
func TestPoolMonitorCleanup(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 2, |
||||
|
MaxActive: 2, |
||||
|
Dial: d.dial, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c := p.Get() |
||||
|
c.Send("MONITOR") |
||||
|
c.Close() |
||||
|
|
||||
|
d.check("", p, 1, 0) |
||||
|
} |
||||
|
|
||||
|
func TestPoolPubSubCleanup(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 2, |
||||
|
MaxActive: 2, |
||||
|
Dial: d.dial, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c := p.Get() |
||||
|
c.Send("SUBSCRIBE", "x") |
||||
|
c.Close() |
||||
|
|
||||
|
want := []string{"SUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} |
||||
|
if !reflect.DeepEqual(d.commands, want) { |
||||
|
t.Errorf("got commands %v, want %v", d.commands, want) |
||||
|
} |
||||
|
d.commands = nil |
||||
|
|
||||
|
c = p.Get() |
||||
|
c.Send("PSUBSCRIBE", "x*") |
||||
|
c.Close() |
||||
|
|
||||
|
want = []string{"PSUBSCRIBE", "UNSUBSCRIBE", "PUNSUBSCRIBE", "ECHO"} |
||||
|
if !reflect.DeepEqual(d.commands, want) { |
||||
|
t.Errorf("got commands %v, want %v", d.commands, want) |
||||
|
} |
||||
|
d.commands = nil |
||||
|
} |
||||
|
|
||||
|
func TestPoolTransactionCleanup(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 2, |
||||
|
MaxActive: 2, |
||||
|
Dial: d.dial, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c := p.Get() |
||||
|
c.Do("WATCH", "key") |
||||
|
c.Do("PING") |
||||
|
c.Close() |
||||
|
|
||||
|
want := []string{"WATCH", "PING", "UNWATCH"} |
||||
|
if !reflect.DeepEqual(d.commands, want) { |
||||
|
t.Errorf("got commands %v, want %v", d.commands, want) |
||||
|
} |
||||
|
d.commands = nil |
||||
|
|
||||
|
c = p.Get() |
||||
|
c.Do("WATCH", "key") |
||||
|
c.Do("UNWATCH") |
||||
|
c.Do("PING") |
||||
|
c.Close() |
||||
|
|
||||
|
want = []string{"WATCH", "UNWATCH", "PING"} |
||||
|
if !reflect.DeepEqual(d.commands, want) { |
||||
|
t.Errorf("got commands %v, want %v", d.commands, want) |
||||
|
} |
||||
|
d.commands = nil |
||||
|
|
||||
|
c = p.Get() |
||||
|
c.Do("WATCH", "key") |
||||
|
c.Do("MULTI") |
||||
|
c.Do("PING") |
||||
|
c.Close() |
||||
|
|
||||
|
want = []string{"WATCH", "MULTI", "PING", "DISCARD"} |
||||
|
if !reflect.DeepEqual(d.commands, want) { |
||||
|
t.Errorf("got commands %v, want %v", d.commands, want) |
||||
|
} |
||||
|
d.commands = nil |
||||
|
|
||||
|
c = p.Get() |
||||
|
c.Do("WATCH", "key") |
||||
|
c.Do("MULTI") |
||||
|
c.Do("DISCARD") |
||||
|
c.Do("PING") |
||||
|
c.Close() |
||||
|
|
||||
|
want = []string{"WATCH", "MULTI", "DISCARD", "PING"} |
||||
|
if !reflect.DeepEqual(d.commands, want) { |
||||
|
t.Errorf("got commands %v, want %v", d.commands, want) |
||||
|
} |
||||
|
d.commands = nil |
||||
|
|
||||
|
c = p.Get() |
||||
|
c.Do("WATCH", "key") |
||||
|
c.Do("MULTI") |
||||
|
c.Do("EXEC") |
||||
|
c.Do("PING") |
||||
|
c.Close() |
||||
|
|
||||
|
want = []string{"WATCH", "MULTI", "EXEC", "PING"} |
||||
|
if !reflect.DeepEqual(d.commands, want) { |
||||
|
t.Errorf("got commands %v, want %v", d.commands, want) |
||||
|
} |
||||
|
d.commands = nil |
||||
|
} |
||||
|
|
||||
|
func startGoroutines(p *redis.Pool, cmd string, args ...interface{}) chan error { |
||||
|
errs := make(chan error, 10) |
||||
|
for i := 0; i < cap(errs); i++ { |
||||
|
go func() { |
||||
|
c := p.Get() |
||||
|
_, err := c.Do(cmd, args...) |
||||
|
errs <- err |
||||
|
c.Close() |
||||
|
}() |
||||
|
} |
||||
|
|
||||
|
// Wait for goroutines to block.
|
||||
|
time.Sleep(time.Second / 4) |
||||
|
|
||||
|
return errs |
||||
|
} |
||||
|
|
||||
|
func TestWaitPool(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 1, |
||||
|
MaxActive: 1, |
||||
|
Dial: d.dial, |
||||
|
Wait: true, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c := p.Get() |
||||
|
errs := startGoroutines(p, "PING") |
||||
|
d.check("before close", p, 1, 1) |
||||
|
c.Close() |
||||
|
timeout := time.After(2 * time.Second) |
||||
|
for i := 0; i < cap(errs); i++ { |
||||
|
select { |
||||
|
case err := <-errs: |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
case <-timeout: |
||||
|
t.Fatalf("timeout waiting for blocked goroutine %d", i) |
||||
|
} |
||||
|
} |
||||
|
d.check("done", p, 1, 1) |
||||
|
} |
||||
|
|
||||
|
func TestWaitPoolClose(t *testing.T) { |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 1, |
||||
|
MaxActive: 1, |
||||
|
Dial: d.dial, |
||||
|
Wait: true, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c := p.Get() |
||||
|
if _, err := c.Do("PING"); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
errs := startGoroutines(p, "PING") |
||||
|
d.check("before close", p, 1, 1) |
||||
|
p.Close() |
||||
|
timeout := time.After(2 * time.Second) |
||||
|
for i := 0; i < cap(errs); i++ { |
||||
|
select { |
||||
|
case err := <-errs: |
||||
|
switch err { |
||||
|
case nil: |
||||
|
t.Fatal("blocked goroutine did not get error") |
||||
|
case redis.ErrPoolExhausted: |
||||
|
t.Fatal("blocked goroutine got pool exhausted error") |
||||
|
} |
||||
|
case <-timeout: |
||||
|
t.Fatal("timeout waiting for blocked goroutine") |
||||
|
} |
||||
|
} |
||||
|
c.Close() |
||||
|
d.check("done", p, 1, 0) |
||||
|
} |
||||
|
|
||||
|
func TestWaitPoolCommandError(t *testing.T) { |
||||
|
testErr := errors.New("test") |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 1, |
||||
|
MaxActive: 1, |
||||
|
Dial: d.dial, |
||||
|
Wait: true, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c := p.Get() |
||||
|
errs := startGoroutines(p, "ERR", testErr) |
||||
|
d.check("before close", p, 1, 1) |
||||
|
c.Close() |
||||
|
timeout := time.After(2 * time.Second) |
||||
|
for i := 0; i < cap(errs); i++ { |
||||
|
select { |
||||
|
case err := <-errs: |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
case <-timeout: |
||||
|
t.Fatalf("timeout waiting for blocked goroutine %d", i) |
||||
|
} |
||||
|
} |
||||
|
d.check("done", p, cap(errs), 0) |
||||
|
} |
||||
|
|
||||
|
func TestWaitPoolDialError(t *testing.T) { |
||||
|
testErr := errors.New("test") |
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: 1, |
||||
|
MaxActive: 1, |
||||
|
Dial: d.dial, |
||||
|
Wait: true, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
c := p.Get() |
||||
|
errs := startGoroutines(p, "ERR", testErr) |
||||
|
d.check("before close", p, 1, 1) |
||||
|
|
||||
|
d.dialErr = errors.New("dial") |
||||
|
c.Close() |
||||
|
|
||||
|
nilCount := 0 |
||||
|
errCount := 0 |
||||
|
timeout := time.After(2 * time.Second) |
||||
|
for i := 0; i < cap(errs); i++ { |
||||
|
select { |
||||
|
case err := <-errs: |
||||
|
switch err { |
||||
|
case nil: |
||||
|
nilCount++ |
||||
|
case d.dialErr: |
||||
|
errCount++ |
||||
|
default: |
||||
|
t.Fatalf("expected dial error or nil, got %v", err) |
||||
|
} |
||||
|
case <-timeout: |
||||
|
t.Fatalf("timeout waiting for blocked goroutine %d", i) |
||||
|
} |
||||
|
} |
||||
|
if nilCount != 1 { |
||||
|
t.Errorf("expected one nil error, got %d", nilCount) |
||||
|
} |
||||
|
if errCount != cap(errs)-1 { |
||||
|
t.Errorf("expected %d dial errors, got %d", cap(errs)-1, errCount) |
||||
|
} |
||||
|
d.check("done", p, cap(errs), 0) |
||||
|
} |
||||
|
|
||||
|
// Borrowing requires us to iterate over the idle connections, unlock the pool,
|
||||
|
// and perform a blocking operation to check the connection still works. If
|
||||
|
// TestOnBorrow fails, we must reacquire the lock and continue iteration. This
|
||||
|
// test ensures that iteration will work correctly if multiple threads are
|
||||
|
// iterating simultaneously.
|
||||
|
func TestLocking_TestOnBorrowFails_PoolDoesntCrash(t *testing.T) { |
||||
|
const count = 100 |
||||
|
|
||||
|
// First we'll Create a pool where the pilfering of idle connections fails.
|
||||
|
d := poolDialer{t: t} |
||||
|
p := &redis.Pool{ |
||||
|
MaxIdle: count, |
||||
|
MaxActive: count, |
||||
|
Dial: d.dial, |
||||
|
TestOnBorrow: func(c redis.Conn, t time.Time) error { |
||||
|
return errors.New("No way back into the real world.") |
||||
|
}, |
||||
|
} |
||||
|
defer p.Close() |
||||
|
|
||||
|
// Fill the pool with idle connections.
|
||||
|
conns := make([]redis.Conn, count) |
||||
|
for i := range conns { |
||||
|
conns[i] = p.Get() |
||||
|
} |
||||
|
for i := range conns { |
||||
|
conns[i].Close() |
||||
|
} |
||||
|
|
||||
|
// Spawn a bunch of goroutines to thrash the pool.
|
||||
|
var wg sync.WaitGroup |
||||
|
wg.Add(count) |
||||
|
for i := 0; i < count; i++ { |
||||
|
go func() { |
||||
|
c := p.Get() |
||||
|
if c.Err() != nil { |
||||
|
t.Errorf("pool get failed: %v", c.Err()) |
||||
|
} |
||||
|
c.Close() |
||||
|
wg.Done() |
||||
|
}() |
||||
|
} |
||||
|
wg.Wait() |
||||
|
if d.dialed != count*2 { |
||||
|
t.Errorf("Expected %d dials, got %d", count*2, d.dialed) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkPoolGet(b *testing.B) { |
||||
|
b.StopTimer() |
||||
|
p := redis.Pool{Dial: redis.DialDefaultServer, MaxIdle: 2} |
||||
|
c := p.Get() |
||||
|
if err := c.Err(); err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
c.Close() |
||||
|
defer p.Close() |
||||
|
b.StartTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
c = p.Get() |
||||
|
c.Close() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkPoolGetErr(b *testing.B) { |
||||
|
b.StopTimer() |
||||
|
p := redis.Pool{Dial: redis.DialDefaultServer, MaxIdle: 2} |
||||
|
c := p.Get() |
||||
|
if err := c.Err(); err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
c.Close() |
||||
|
defer p.Close() |
||||
|
b.StartTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
c = p.Get() |
||||
|
if err := c.Err(); err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
c.Close() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkPoolGetPing(b *testing.B) { |
||||
|
b.StopTimer() |
||||
|
p := redis.Pool{Dial: redis.DialDefaultServer, MaxIdle: 2} |
||||
|
c := p.Get() |
||||
|
if err := c.Err(); err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
c.Close() |
||||
|
defer p.Close() |
||||
|
b.StartTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
c = p.Get() |
||||
|
if _, err := c.Do("PING"); err != nil { |
||||
|
b.Fatal(err) |
||||
|
} |
||||
|
c.Close() |
||||
|
} |
||||
|
} |
@ -0,0 +1,144 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis |
||||
|
|
||||
|
import "errors" |
||||
|
|
||||
|
// Subscription represents a subscribe or unsubscribe notification.
|
||||
|
type Subscription struct { |
||||
|
|
||||
|
// Kind is "subscribe", "unsubscribe", "psubscribe" or "punsubscribe"
|
||||
|
Kind string |
||||
|
|
||||
|
// The channel that was changed.
|
||||
|
Channel string |
||||
|
|
||||
|
// The current number of subscriptions for connection.
|
||||
|
Count int |
||||
|
} |
||||
|
|
||||
|
// Message represents a message notification.
|
||||
|
type Message struct { |
||||
|
|
||||
|
// The originating channel.
|
||||
|
Channel string |
||||
|
|
||||
|
// The message data.
|
||||
|
Data []byte |
||||
|
} |
||||
|
|
||||
|
// PMessage represents a pmessage notification.
|
||||
|
type PMessage struct { |
||||
|
|
||||
|
// The matched pattern.
|
||||
|
Pattern string |
||||
|
|
||||
|
// The originating channel.
|
||||
|
Channel string |
||||
|
|
||||
|
// The message data.
|
||||
|
Data []byte |
||||
|
} |
||||
|
|
||||
|
// Pong represents a pubsub pong notification.
|
||||
|
type Pong struct { |
||||
|
Data string |
||||
|
} |
||||
|
|
||||
|
// PubSubConn wraps a Conn with convenience methods for subscribers.
|
||||
|
type PubSubConn struct { |
||||
|
Conn Conn |
||||
|
} |
||||
|
|
||||
|
// Close closes the connection.
|
||||
|
func (c PubSubConn) Close() error { |
||||
|
return c.Conn.Close() |
||||
|
} |
||||
|
|
||||
|
// Subscribe subscribes the connection to the specified channels.
|
||||
|
func (c PubSubConn) Subscribe(channel ...interface{}) error { |
||||
|
c.Conn.Send("SUBSCRIBE", channel...) |
||||
|
return c.Conn.Flush() |
||||
|
} |
||||
|
|
||||
|
// PSubscribe subscribes the connection to the given patterns.
|
||||
|
func (c PubSubConn) PSubscribe(channel ...interface{}) error { |
||||
|
c.Conn.Send("PSUBSCRIBE", channel...) |
||||
|
return c.Conn.Flush() |
||||
|
} |
||||
|
|
||||
|
// Unsubscribe unsubscribes the connection from the given channels, or from all
|
||||
|
// of them if none is given.
|
||||
|
func (c PubSubConn) Unsubscribe(channel ...interface{}) error { |
||||
|
c.Conn.Send("UNSUBSCRIBE", channel...) |
||||
|
return c.Conn.Flush() |
||||
|
} |
||||
|
|
||||
|
// PUnsubscribe unsubscribes the connection from the given patterns, or from all
|
||||
|
// of them if none is given.
|
||||
|
func (c PubSubConn) PUnsubscribe(channel ...interface{}) error { |
||||
|
c.Conn.Send("PUNSUBSCRIBE", channel...) |
||||
|
return c.Conn.Flush() |
||||
|
} |
||||
|
|
||||
|
// Ping sends a PING to the server with the specified data.
|
||||
|
func (c PubSubConn) Ping(data string) error { |
||||
|
c.Conn.Send("PING", data) |
||||
|
return c.Conn.Flush() |
||||
|
} |
||||
|
|
||||
|
// Receive returns a pushed message as a Subscription, Message, PMessage, Pong
|
||||
|
// or error. The return value is intended to be used directly in a type switch
|
||||
|
// as illustrated in the PubSubConn example.
|
||||
|
func (c PubSubConn) Receive() interface{} { |
||||
|
reply, err := Values(c.Conn.Receive()) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
var kind string |
||||
|
reply, err = Scan(reply, &kind) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
switch kind { |
||||
|
case "message": |
||||
|
var m Message |
||||
|
if _, err := Scan(reply, &m.Channel, &m.Data); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return m |
||||
|
case "pmessage": |
||||
|
var pm PMessage |
||||
|
if _, err := Scan(reply, &pm.Pattern, &pm.Channel, &pm.Data); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return pm |
||||
|
case "subscribe", "psubscribe", "unsubscribe", "punsubscribe": |
||||
|
s := Subscription{Kind: kind} |
||||
|
if _, err := Scan(reply, &s.Channel, &s.Count); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return s |
||||
|
case "pong": |
||||
|
var p Pong |
||||
|
if _, err := Scan(reply, &p.Data); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
return p |
||||
|
} |
||||
|
return errors.New("redigo: unknown pubsub notification") |
||||
|
} |
@ -0,0 +1,148 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis_test |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"reflect" |
||||
|
"sync" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/garyburd/redigo/redis" |
||||
|
) |
||||
|
|
||||
|
func publish(channel, value interface{}) { |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
defer c.Close() |
||||
|
c.Do("PUBLISH", channel, value) |
||||
|
} |
||||
|
|
||||
|
// Applications can receive pushed messages from one goroutine and manage subscriptions from another goroutine.
|
||||
|
func ExamplePubSubConn() { |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
defer c.Close() |
||||
|
var wg sync.WaitGroup |
||||
|
wg.Add(2) |
||||
|
|
||||
|
psc := redis.PubSubConn{Conn: c} |
||||
|
|
||||
|
// This goroutine receives and prints pushed notifications from the server.
|
||||
|
// The goroutine exits when the connection is unsubscribed from all
|
||||
|
// channels or there is an error.
|
||||
|
go func() { |
||||
|
defer wg.Done() |
||||
|
for { |
||||
|
switch n := psc.Receive().(type) { |
||||
|
case redis.Message: |
||||
|
fmt.Printf("Message: %s %s\n", n.Channel, n.Data) |
||||
|
case redis.PMessage: |
||||
|
fmt.Printf("PMessage: %s %s %s\n", n.Pattern, n.Channel, n.Data) |
||||
|
case redis.Subscription: |
||||
|
fmt.Printf("Subscription: %s %s %d\n", n.Kind, n.Channel, n.Count) |
||||
|
if n.Count == 0 { |
||||
|
return |
||||
|
} |
||||
|
case error: |
||||
|
fmt.Printf("error: %v\n", n) |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
}() |
||||
|
|
||||
|
// This goroutine manages subscriptions for the connection.
|
||||
|
go func() { |
||||
|
defer wg.Done() |
||||
|
|
||||
|
psc.Subscribe("example") |
||||
|
psc.PSubscribe("p*") |
||||
|
|
||||
|
// The following function calls publish a message using another
|
||||
|
// connection to the Redis server.
|
||||
|
publish("example", "hello") |
||||
|
publish("example", "world") |
||||
|
publish("pexample", "foo") |
||||
|
publish("pexample", "bar") |
||||
|
|
||||
|
// Unsubscribe from all connections. This will cause the receiving
|
||||
|
// goroutine to exit.
|
||||
|
psc.Unsubscribe() |
||||
|
psc.PUnsubscribe() |
||||
|
}() |
||||
|
|
||||
|
wg.Wait() |
||||
|
|
||||
|
// Output:
|
||||
|
// Subscription: subscribe example 1
|
||||
|
// Subscription: psubscribe p* 2
|
||||
|
// Message: example hello
|
||||
|
// Message: example world
|
||||
|
// PMessage: p* pexample foo
|
||||
|
// PMessage: p* pexample bar
|
||||
|
// Subscription: unsubscribe example 1
|
||||
|
// Subscription: punsubscribe p* 0
|
||||
|
} |
||||
|
|
||||
|
func expectPushed(t *testing.T, c redis.PubSubConn, message string, expected interface{}) { |
||||
|
actual := c.Receive() |
||||
|
if !reflect.DeepEqual(actual, expected) { |
||||
|
t.Errorf("%s = %v, want %v", message, actual, expected) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestPushed(t *testing.T) { |
||||
|
pc, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
t.Fatalf("error connection to database, %v", err) |
||||
|
} |
||||
|
defer pc.Close() |
||||
|
|
||||
|
sc, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
t.Fatalf("error connection to database, %v", err) |
||||
|
} |
||||
|
defer sc.Close() |
||||
|
|
||||
|
c := redis.PubSubConn{Conn: sc} |
||||
|
|
||||
|
c.Subscribe("c1") |
||||
|
expectPushed(t, c, "Subscribe(c1)", redis.Subscription{Kind: "subscribe", Channel: "c1", Count: 1}) |
||||
|
c.Subscribe("c2") |
||||
|
expectPushed(t, c, "Subscribe(c2)", redis.Subscription{Kind: "subscribe", Channel: "c2", Count: 2}) |
||||
|
c.PSubscribe("p1") |
||||
|
expectPushed(t, c, "PSubscribe(p1)", redis.Subscription{Kind: "psubscribe", Channel: "p1", Count: 3}) |
||||
|
c.PSubscribe("p2") |
||||
|
expectPushed(t, c, "PSubscribe(p2)", redis.Subscription{Kind: "psubscribe", Channel: "p2", Count: 4}) |
||||
|
c.PUnsubscribe() |
||||
|
expectPushed(t, c, "Punsubscribe(p1)", redis.Subscription{Kind: "punsubscribe", Channel: "p1", Count: 3}) |
||||
|
expectPushed(t, c, "Punsubscribe()", redis.Subscription{Kind: "punsubscribe", Channel: "p2", Count: 2}) |
||||
|
|
||||
|
pc.Do("PUBLISH", "c1", "hello") |
||||
|
expectPushed(t, c, "PUBLISH c1 hello", redis.Message{Channel: "c1", Data: []byte("hello")}) |
||||
|
|
||||
|
c.Ping("hello") |
||||
|
expectPushed(t, c, `Ping("hello")`, redis.Pong{Data: "hello"}) |
||||
|
|
||||
|
c.Conn.Send("PING") |
||||
|
c.Conn.Flush() |
||||
|
expectPushed(t, c, `Send("PING")`, redis.Pong{}) |
||||
|
} |
@ -0,0 +1,44 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis |
||||
|
|
||||
|
// Error represents an error returned in a command reply.
|
||||
|
type Error string |
||||
|
|
||||
|
func (err Error) Error() string { return string(err) } |
||||
|
|
||||
|
// Conn represents a connection to a Redis server.
|
||||
|
type Conn interface { |
||||
|
// Close closes the connection.
|
||||
|
Close() error |
||||
|
|
||||
|
// Err returns a non-nil value if the connection is broken. The returned
|
||||
|
// value is either the first non-nil value returned from the underlying
|
||||
|
// network connection or a protocol parsing error. Applications should
|
||||
|
// close broken connections.
|
||||
|
Err() error |
||||
|
|
||||
|
// Do sends a command to the server and returns the received reply.
|
||||
|
Do(commandName string, args ...interface{}) (reply interface{}, err error) |
||||
|
|
||||
|
// Send writes the command to the client's output buffer.
|
||||
|
Send(commandName string, args ...interface{}) error |
||||
|
|
||||
|
// Flush flushes the output buffer to the Redis server.
|
||||
|
Flush() error |
||||
|
|
||||
|
// Receive receives a single reply from the Redis server
|
||||
|
Receive() (reply interface{}, err error) |
||||
|
} |
@ -0,0 +1,393 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"fmt" |
||||
|
"strconv" |
||||
|
) |
||||
|
|
||||
|
// ErrNil indicates that a reply value is nil.
|
||||
|
var ErrNil = errors.New("redigo: nil returned") |
||||
|
|
||||
|
// Int is a helper that converts a command reply to an integer. If err is not
|
||||
|
// equal to nil, then Int returns 0, err. Otherwise, Int converts the
|
||||
|
// reply to an int as follows:
|
||||
|
//
|
||||
|
// Reply type Result
|
||||
|
// integer int(reply), nil
|
||||
|
// bulk string parsed reply, nil
|
||||
|
// nil 0, ErrNil
|
||||
|
// other 0, error
|
||||
|
func Int(reply interface{}, err error) (int, error) { |
||||
|
if err != nil { |
||||
|
return 0, err |
||||
|
} |
||||
|
switch reply := reply.(type) { |
||||
|
case int64: |
||||
|
x := int(reply) |
||||
|
if int64(x) != reply { |
||||
|
return 0, strconv.ErrRange |
||||
|
} |
||||
|
return x, nil |
||||
|
case []byte: |
||||
|
n, err := strconv.ParseInt(string(reply), 10, 0) |
||||
|
return int(n), err |
||||
|
case nil: |
||||
|
return 0, ErrNil |
||||
|
case Error: |
||||
|
return 0, reply |
||||
|
} |
||||
|
return 0, fmt.Errorf("redigo: unexpected type for Int, got type %T", reply) |
||||
|
} |
||||
|
|
||||
|
// Int64 is a helper that converts a command reply to 64 bit integer. If err is
|
||||
|
// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
|
||||
|
// reply to an int64 as follows:
|
||||
|
//
|
||||
|
// Reply type Result
|
||||
|
// integer reply, nil
|
||||
|
// bulk string parsed reply, nil
|
||||
|
// nil 0, ErrNil
|
||||
|
// other 0, error
|
||||
|
func Int64(reply interface{}, err error) (int64, error) { |
||||
|
if err != nil { |
||||
|
return 0, err |
||||
|
} |
||||
|
switch reply := reply.(type) { |
||||
|
case int64: |
||||
|
return reply, nil |
||||
|
case []byte: |
||||
|
n, err := strconv.ParseInt(string(reply), 10, 64) |
||||
|
return n, err |
||||
|
case nil: |
||||
|
return 0, ErrNil |
||||
|
case Error: |
||||
|
return 0, reply |
||||
|
} |
||||
|
return 0, fmt.Errorf("redigo: unexpected type for Int64, got type %T", reply) |
||||
|
} |
||||
|
|
||||
|
var errNegativeInt = errors.New("redigo: unexpected value for Uint64") |
||||
|
|
||||
|
// Uint64 is a helper that converts a command reply to 64 bit integer. If err is
|
||||
|
// not equal to nil, then Int returns 0, err. Otherwise, Int64 converts the
|
||||
|
// reply to an int64 as follows:
|
||||
|
//
|
||||
|
// Reply type Result
|
||||
|
// integer reply, nil
|
||||
|
// bulk string parsed reply, nil
|
||||
|
// nil 0, ErrNil
|
||||
|
// other 0, error
|
||||
|
func Uint64(reply interface{}, err error) (uint64, error) { |
||||
|
if err != nil { |
||||
|
return 0, err |
||||
|
} |
||||
|
switch reply := reply.(type) { |
||||
|
case int64: |
||||
|
if reply < 0 { |
||||
|
return 0, errNegativeInt |
||||
|
} |
||||
|
return uint64(reply), nil |
||||
|
case []byte: |
||||
|
n, err := strconv.ParseUint(string(reply), 10, 64) |
||||
|
return n, err |
||||
|
case nil: |
||||
|
return 0, ErrNil |
||||
|
case Error: |
||||
|
return 0, reply |
||||
|
} |
||||
|
return 0, fmt.Errorf("redigo: unexpected type for Uint64, got type %T", reply) |
||||
|
} |
||||
|
|
||||
|
// Float64 is a helper that converts a command reply to 64 bit float. If err is
|
||||
|
// not equal to nil, then Float64 returns 0, err. Otherwise, Float64 converts
|
||||
|
// the reply to an int as follows:
|
||||
|
//
|
||||
|
// Reply type Result
|
||||
|
// bulk string parsed reply, nil
|
||||
|
// nil 0, ErrNil
|
||||
|
// other 0, error
|
||||
|
func Float64(reply interface{}, err error) (float64, error) { |
||||
|
if err != nil { |
||||
|
return 0, err |
||||
|
} |
||||
|
switch reply := reply.(type) { |
||||
|
case []byte: |
||||
|
n, err := strconv.ParseFloat(string(reply), 64) |
||||
|
return n, err |
||||
|
case nil: |
||||
|
return 0, ErrNil |
||||
|
case Error: |
||||
|
return 0, reply |
||||
|
} |
||||
|
return 0, fmt.Errorf("redigo: unexpected type for Float64, got type %T", reply) |
||||
|
} |
||||
|
|
||||
|
// String is a helper that converts a command reply to a string. If err is not
|
||||
|
// equal to nil, then String returns "", err. Otherwise String converts the
|
||||
|
// reply to a string as follows:
|
||||
|
//
|
||||
|
// Reply type Result
|
||||
|
// bulk string string(reply), nil
|
||||
|
// simple string reply, nil
|
||||
|
// nil "", ErrNil
|
||||
|
// other "", error
|
||||
|
func String(reply interface{}, err error) (string, error) { |
||||
|
if err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
switch reply := reply.(type) { |
||||
|
case []byte: |
||||
|
return string(reply), nil |
||||
|
case string: |
||||
|
return reply, nil |
||||
|
case nil: |
||||
|
return "", ErrNil |
||||
|
case Error: |
||||
|
return "", reply |
||||
|
} |
||||
|
return "", fmt.Errorf("redigo: unexpected type for String, got type %T", reply) |
||||
|
} |
||||
|
|
||||
|
// Bytes is a helper that converts a command reply to a slice of bytes. If err
|
||||
|
// is not equal to nil, then Bytes returns nil, err. Otherwise Bytes converts
|
||||
|
// the reply to a slice of bytes as follows:
|
||||
|
//
|
||||
|
// Reply type Result
|
||||
|
// bulk string reply, nil
|
||||
|
// simple string []byte(reply), nil
|
||||
|
// nil nil, ErrNil
|
||||
|
// other nil, error
|
||||
|
func Bytes(reply interface{}, err error) ([]byte, error) { |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
switch reply := reply.(type) { |
||||
|
case []byte: |
||||
|
return reply, nil |
||||
|
case string: |
||||
|
return []byte(reply), nil |
||||
|
case nil: |
||||
|
return nil, ErrNil |
||||
|
case Error: |
||||
|
return nil, reply |
||||
|
} |
||||
|
return nil, fmt.Errorf("redigo: unexpected type for Bytes, got type %T", reply) |
||||
|
} |
||||
|
|
||||
|
// Bool is a helper that converts a command reply to a boolean. If err is not
|
||||
|
// equal to nil, then Bool returns false, err. Otherwise Bool converts the
|
||||
|
// reply to boolean as follows:
|
||||
|
//
|
||||
|
// Reply type Result
|
||||
|
// integer value != 0, nil
|
||||
|
// bulk string strconv.ParseBool(reply)
|
||||
|
// nil false, ErrNil
|
||||
|
// other false, error
|
||||
|
func Bool(reply interface{}, err error) (bool, error) { |
||||
|
if err != nil { |
||||
|
return false, err |
||||
|
} |
||||
|
switch reply := reply.(type) { |
||||
|
case int64: |
||||
|
return reply != 0, nil |
||||
|
case []byte: |
||||
|
return strconv.ParseBool(string(reply)) |
||||
|
case nil: |
||||
|
return false, ErrNil |
||||
|
case Error: |
||||
|
return false, reply |
||||
|
} |
||||
|
return false, fmt.Errorf("redigo: unexpected type for Bool, got type %T", reply) |
||||
|
} |
||||
|
|
||||
|
// MultiBulk is a helper that converts an array command reply to a []interface{}.
|
||||
|
//
|
||||
|
// Deprecated: Use Values instead.
|
||||
|
func MultiBulk(reply interface{}, err error) ([]interface{}, error) { return Values(reply, err) } |
||||
|
|
||||
|
// Values is a helper that converts an array command reply to a []interface{}.
|
||||
|
// If err is not equal to nil, then Values returns nil, err. Otherwise, Values
|
||||
|
// converts the reply as follows:
|
||||
|
//
|
||||
|
// Reply type Result
|
||||
|
// array reply, nil
|
||||
|
// nil nil, ErrNil
|
||||
|
// other nil, error
|
||||
|
func Values(reply interface{}, err error) ([]interface{}, error) { |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
switch reply := reply.(type) { |
||||
|
case []interface{}: |
||||
|
return reply, nil |
||||
|
case nil: |
||||
|
return nil, ErrNil |
||||
|
case Error: |
||||
|
return nil, reply |
||||
|
} |
||||
|
return nil, fmt.Errorf("redigo: unexpected type for Values, got type %T", reply) |
||||
|
} |
||||
|
|
||||
|
// Strings is a helper that converts an array command reply to a []string. If
|
||||
|
// err is not equal to nil, then Strings returns nil, err. Nil array items are
|
||||
|
// converted to "" in the output slice. Strings returns an error if an array
|
||||
|
// item is not a bulk string or nil.
|
||||
|
func Strings(reply interface{}, err error) ([]string, error) { |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
switch reply := reply.(type) { |
||||
|
case []interface{}: |
||||
|
result := make([]string, len(reply)) |
||||
|
for i := range reply { |
||||
|
if reply[i] == nil { |
||||
|
continue |
||||
|
} |
||||
|
p, ok := reply[i].([]byte) |
||||
|
if !ok { |
||||
|
return nil, fmt.Errorf("redigo: unexpected element type for Strings, got type %T", reply[i]) |
||||
|
} |
||||
|
result[i] = string(p) |
||||
|
} |
||||
|
return result, nil |
||||
|
case nil: |
||||
|
return nil, ErrNil |
||||
|
case Error: |
||||
|
return nil, reply |
||||
|
} |
||||
|
return nil, fmt.Errorf("redigo: unexpected type for Strings, got type %T", reply) |
||||
|
} |
||||
|
|
||||
|
// ByteSlices is a helper that converts an array command reply to a [][]byte.
|
||||
|
// If err is not equal to nil, then ByteSlices returns nil, err. Nil array
|
||||
|
// items are stay nil. ByteSlices returns an error if an array item is not a
|
||||
|
// bulk string or nil.
|
||||
|
func ByteSlices(reply interface{}, err error) ([][]byte, error) { |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
switch reply := reply.(type) { |
||||
|
case []interface{}: |
||||
|
result := make([][]byte, len(reply)) |
||||
|
for i := range reply { |
||||
|
if reply[i] == nil { |
||||
|
continue |
||||
|
} |
||||
|
p, ok := reply[i].([]byte) |
||||
|
if !ok { |
||||
|
return nil, fmt.Errorf("redigo: unexpected element type for ByteSlices, got type %T", reply[i]) |
||||
|
} |
||||
|
result[i] = p |
||||
|
} |
||||
|
return result, nil |
||||
|
case nil: |
||||
|
return nil, ErrNil |
||||
|
case Error: |
||||
|
return nil, reply |
||||
|
} |
||||
|
return nil, fmt.Errorf("redigo: unexpected type for ByteSlices, got type %T", reply) |
||||
|
} |
||||
|
|
||||
|
// Ints is a helper that converts an array command reply to a []int. If
|
||||
|
// err is not equal to nil, then Ints returns nil, err.
|
||||
|
func Ints(reply interface{}, err error) ([]int, error) { |
||||
|
var ints []int |
||||
|
values, err := Values(reply, err) |
||||
|
if err != nil { |
||||
|
return ints, err |
||||
|
} |
||||
|
if err := ScanSlice(values, &ints); err != nil { |
||||
|
return ints, err |
||||
|
} |
||||
|
return ints, nil |
||||
|
} |
||||
|
|
||||
|
// StringMap is a helper that converts an array of strings (alternating key, value)
|
||||
|
// into a map[string]string. The HGETALL and CONFIG GET commands return replies in this format.
|
||||
|
// Requires an even number of values in result.
|
||||
|
func StringMap(result interface{}, err error) (map[string]string, error) { |
||||
|
values, err := Values(result, err) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
if len(values)%2 != 0 { |
||||
|
return nil, errors.New("redigo: StringMap expects even number of values result") |
||||
|
} |
||||
|
m := make(map[string]string, len(values)/2) |
||||
|
for i := 0; i < len(values); i += 2 { |
||||
|
key, okKey := values[i].([]byte) |
||||
|
value, okValue := values[i+1].([]byte) |
||||
|
if !okKey || !okValue { |
||||
|
return nil, errors.New("redigo: ScanMap key not a bulk string value") |
||||
|
} |
||||
|
m[string(key)] = string(value) |
||||
|
} |
||||
|
return m, nil |
||||
|
} |
||||
|
|
||||
|
// IntMap is a helper that converts an array of strings (alternating key, value)
|
||||
|
// into a map[string]int. The HGETALL commands return replies in this format.
|
||||
|
// Requires an even number of values in result.
|
||||
|
func IntMap(result interface{}, err error) (map[string]int, error) { |
||||
|
values, err := Values(result, err) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
if len(values)%2 != 0 { |
||||
|
return nil, errors.New("redigo: IntMap expects even number of values result") |
||||
|
} |
||||
|
m := make(map[string]int, len(values)/2) |
||||
|
for i := 0; i < len(values); i += 2 { |
||||
|
key, ok := values[i].([]byte) |
||||
|
if !ok { |
||||
|
return nil, errors.New("redigo: ScanMap key not a bulk string value") |
||||
|
} |
||||
|
value, err := Int(values[i+1], nil) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
m[string(key)] = value |
||||
|
} |
||||
|
return m, nil |
||||
|
} |
||||
|
|
||||
|
// Int64Map is a helper that converts an array of strings (alternating key, value)
|
||||
|
// into a map[string]int64. The HGETALL commands return replies in this format.
|
||||
|
// Requires an even number of values in result.
|
||||
|
func Int64Map(result interface{}, err error) (map[string]int64, error) { |
||||
|
values, err := Values(result, err) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
if len(values)%2 != 0 { |
||||
|
return nil, errors.New("redigo: Int64Map expects even number of values result") |
||||
|
} |
||||
|
m := make(map[string]int64, len(values)/2) |
||||
|
for i := 0; i < len(values); i += 2 { |
||||
|
key, ok := values[i].([]byte) |
||||
|
if !ok { |
||||
|
return nil, errors.New("redigo: ScanMap key not a bulk string value") |
||||
|
} |
||||
|
value, err := Int64(values[i+1], nil) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
m[string(key)] = value |
||||
|
} |
||||
|
return m, nil |
||||
|
} |
@ -0,0 +1,179 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis_test |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"reflect" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/garyburd/redigo/redis" |
||||
|
) |
||||
|
|
||||
|
type valueError struct { |
||||
|
v interface{} |
||||
|
err error |
||||
|
} |
||||
|
|
||||
|
func ve(v interface{}, err error) valueError { |
||||
|
return valueError{v, err} |
||||
|
} |
||||
|
|
||||
|
var replyTests = []struct { |
||||
|
name interface{} |
||||
|
actual valueError |
||||
|
expected valueError |
||||
|
}{ |
||||
|
{ |
||||
|
"ints([v1, v2])", |
||||
|
ve(redis.Ints([]interface{}{[]byte("4"), []byte("5")}, nil)), |
||||
|
ve([]int{4, 5}, nil), |
||||
|
}, |
||||
|
{ |
||||
|
"ints(nil)", |
||||
|
ve(redis.Ints(nil, nil)), |
||||
|
ve([]int(nil), redis.ErrNil), |
||||
|
}, |
||||
|
{ |
||||
|
"strings([v1, v2])", |
||||
|
ve(redis.Strings([]interface{}{[]byte("v1"), []byte("v2")}, nil)), |
||||
|
ve([]string{"v1", "v2"}, nil), |
||||
|
}, |
||||
|
{ |
||||
|
"strings(nil)", |
||||
|
ve(redis.Strings(nil, nil)), |
||||
|
ve([]string(nil), redis.ErrNil), |
||||
|
}, |
||||
|
{ |
||||
|
"byteslices([v1, v2])", |
||||
|
ve(redis.ByteSlices([]interface{}{[]byte("v1"), []byte("v2")}, nil)), |
||||
|
ve([][]byte{[]byte("v1"), []byte("v2")}, nil), |
||||
|
}, |
||||
|
{ |
||||
|
"byteslices(nil)", |
||||
|
ve(redis.ByteSlices(nil, nil)), |
||||
|
ve([][]byte(nil), redis.ErrNil), |
||||
|
}, |
||||
|
{ |
||||
|
"values([v1, v2])", |
||||
|
ve(redis.Values([]interface{}{[]byte("v1"), []byte("v2")}, nil)), |
||||
|
ve([]interface{}{[]byte("v1"), []byte("v2")}, nil), |
||||
|
}, |
||||
|
{ |
||||
|
"values(nil)", |
||||
|
ve(redis.Values(nil, nil)), |
||||
|
ve([]interface{}(nil), redis.ErrNil), |
||||
|
}, |
||||
|
{ |
||||
|
"float64(1.0)", |
||||
|
ve(redis.Float64([]byte("1.0"), nil)), |
||||
|
ve(float64(1.0), nil), |
||||
|
}, |
||||
|
{ |
||||
|
"float64(nil)", |
||||
|
ve(redis.Float64(nil, nil)), |
||||
|
ve(float64(0.0), redis.ErrNil), |
||||
|
}, |
||||
|
{ |
||||
|
"uint64(1)", |
||||
|
ve(redis.Uint64(int64(1), nil)), |
||||
|
ve(uint64(1), nil), |
||||
|
}, |
||||
|
{ |
||||
|
"uint64(-1)", |
||||
|
ve(redis.Uint64(int64(-1), nil)), |
||||
|
ve(uint64(0), redis.ErrNegativeInt), |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
func TestReply(t *testing.T) { |
||||
|
for _, rt := range replyTests { |
||||
|
if rt.actual.err != rt.expected.err { |
||||
|
t.Errorf("%s returned err %v, want %v", rt.name, rt.actual.err, rt.expected.err) |
||||
|
continue |
||||
|
} |
||||
|
if !reflect.DeepEqual(rt.actual.v, rt.expected.v) { |
||||
|
t.Errorf("%s=%+v, want %+v", rt.name, rt.actual.v, rt.expected.v) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// dial wraps DialDefaultServer() with a more suitable function name for examples.
|
||||
|
func dial() (redis.Conn, error) { |
||||
|
return redis.DialDefaultServer() |
||||
|
} |
||||
|
|
||||
|
func ExampleBool() { |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
c.Do("SET", "foo", 1) |
||||
|
exists, _ := redis.Bool(c.Do("EXISTS", "foo")) |
||||
|
fmt.Printf("%#v\n", exists) |
||||
|
// Output:
|
||||
|
// true
|
||||
|
} |
||||
|
|
||||
|
func ExampleInt() { |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
c.Do("SET", "k1", 1) |
||||
|
n, _ := redis.Int(c.Do("GET", "k1")) |
||||
|
fmt.Printf("%#v\n", n) |
||||
|
n, _ = redis.Int(c.Do("INCR", "k1")) |
||||
|
fmt.Printf("%#v\n", n) |
||||
|
// Output:
|
||||
|
// 1
|
||||
|
// 2
|
||||
|
} |
||||
|
|
||||
|
func ExampleInts() { |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
c.Do("SADD", "set_with_integers", 4, 5, 6) |
||||
|
ints, _ := redis.Ints(c.Do("SMEMBERS", "set_with_integers")) |
||||
|
fmt.Printf("%#v\n", ints) |
||||
|
// Output:
|
||||
|
// []int{4, 5, 6}
|
||||
|
} |
||||
|
|
||||
|
func ExampleString() { |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
c.Do("SET", "hello", "world") |
||||
|
s, err := redis.String(c.Do("GET", "hello")) |
||||
|
fmt.Printf("%#v\n", s) |
||||
|
// Output:
|
||||
|
// "world"
|
||||
|
} |
@ -0,0 +1,555 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"fmt" |
||||
|
"reflect" |
||||
|
"strconv" |
||||
|
"strings" |
||||
|
"sync" |
||||
|
) |
||||
|
|
||||
|
func ensureLen(d reflect.Value, n int) { |
||||
|
if n > d.Cap() { |
||||
|
d.Set(reflect.MakeSlice(d.Type(), n, n)) |
||||
|
} else { |
||||
|
d.SetLen(n) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func cannotConvert(d reflect.Value, s interface{}) error { |
||||
|
var sname string |
||||
|
switch s.(type) { |
||||
|
case string: |
||||
|
sname = "Redis simple string" |
||||
|
case Error: |
||||
|
sname = "Redis error" |
||||
|
case int64: |
||||
|
sname = "Redis integer" |
||||
|
case []byte: |
||||
|
sname = "Redis bulk string" |
||||
|
case []interface{}: |
||||
|
sname = "Redis array" |
||||
|
default: |
||||
|
sname = reflect.TypeOf(s).String() |
||||
|
} |
||||
|
return fmt.Errorf("cannot convert from %s to %s", sname, d.Type()) |
||||
|
} |
||||
|
|
||||
|
func convertAssignBulkString(d reflect.Value, s []byte) (err error) { |
||||
|
switch d.Type().Kind() { |
||||
|
case reflect.Float32, reflect.Float64: |
||||
|
var x float64 |
||||
|
x, err = strconv.ParseFloat(string(s), d.Type().Bits()) |
||||
|
d.SetFloat(x) |
||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
||||
|
var x int64 |
||||
|
x, err = strconv.ParseInt(string(s), 10, d.Type().Bits()) |
||||
|
d.SetInt(x) |
||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: |
||||
|
var x uint64 |
||||
|
x, err = strconv.ParseUint(string(s), 10, d.Type().Bits()) |
||||
|
d.SetUint(x) |
||||
|
case reflect.Bool: |
||||
|
var x bool |
||||
|
x, err = strconv.ParseBool(string(s)) |
||||
|
d.SetBool(x) |
||||
|
case reflect.String: |
||||
|
d.SetString(string(s)) |
||||
|
case reflect.Slice: |
||||
|
if d.Type().Elem().Kind() != reflect.Uint8 { |
||||
|
err = cannotConvert(d, s) |
||||
|
} else { |
||||
|
d.SetBytes(s) |
||||
|
} |
||||
|
default: |
||||
|
err = cannotConvert(d, s) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func convertAssignInt(d reflect.Value, s int64) (err error) { |
||||
|
switch d.Type().Kind() { |
||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
||||
|
d.SetInt(s) |
||||
|
if d.Int() != s { |
||||
|
err = strconv.ErrRange |
||||
|
d.SetInt(0) |
||||
|
} |
||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: |
||||
|
if s < 0 { |
||||
|
err = strconv.ErrRange |
||||
|
} else { |
||||
|
x := uint64(s) |
||||
|
d.SetUint(x) |
||||
|
if d.Uint() != x { |
||||
|
err = strconv.ErrRange |
||||
|
d.SetUint(0) |
||||
|
} |
||||
|
} |
||||
|
case reflect.Bool: |
||||
|
d.SetBool(s != 0) |
||||
|
default: |
||||
|
err = cannotConvert(d, s) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func convertAssignValue(d reflect.Value, s interface{}) (err error) { |
||||
|
switch s := s.(type) { |
||||
|
case []byte: |
||||
|
err = convertAssignBulkString(d, s) |
||||
|
case int64: |
||||
|
err = convertAssignInt(d, s) |
||||
|
default: |
||||
|
err = cannotConvert(d, s) |
||||
|
} |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func convertAssignArray(d reflect.Value, s []interface{}) error { |
||||
|
if d.Type().Kind() != reflect.Slice { |
||||
|
return cannotConvert(d, s) |
||||
|
} |
||||
|
ensureLen(d, len(s)) |
||||
|
for i := 0; i < len(s); i++ { |
||||
|
if err := convertAssignValue(d.Index(i), s[i]); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func convertAssign(d interface{}, s interface{}) (err error) { |
||||
|
// Handle the most common destination types using type switches and
|
||||
|
// fall back to reflection for all other types.
|
||||
|
switch s := s.(type) { |
||||
|
case nil: |
||||
|
// ingore
|
||||
|
case []byte: |
||||
|
switch d := d.(type) { |
||||
|
case *string: |
||||
|
*d = string(s) |
||||
|
case *int: |
||||
|
*d, err = strconv.Atoi(string(s)) |
||||
|
case *bool: |
||||
|
*d, err = strconv.ParseBool(string(s)) |
||||
|
case *[]byte: |
||||
|
*d = s |
||||
|
case *interface{}: |
||||
|
*d = s |
||||
|
case nil: |
||||
|
// skip value
|
||||
|
default: |
||||
|
if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { |
||||
|
err = cannotConvert(d, s) |
||||
|
} else { |
||||
|
err = convertAssignBulkString(d.Elem(), s) |
||||
|
} |
||||
|
} |
||||
|
case int64: |
||||
|
switch d := d.(type) { |
||||
|
case *int: |
||||
|
x := int(s) |
||||
|
if int64(x) != s { |
||||
|
err = strconv.ErrRange |
||||
|
x = 0 |
||||
|
} |
||||
|
*d = x |
||||
|
case *bool: |
||||
|
*d = s != 0 |
||||
|
case *interface{}: |
||||
|
*d = s |
||||
|
case nil: |
||||
|
// skip value
|
||||
|
default: |
||||
|
if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { |
||||
|
err = cannotConvert(d, s) |
||||
|
} else { |
||||
|
err = convertAssignInt(d.Elem(), s) |
||||
|
} |
||||
|
} |
||||
|
case string: |
||||
|
switch d := d.(type) { |
||||
|
case *string: |
||||
|
*d = string(s) |
||||
|
default: |
||||
|
err = cannotConvert(reflect.ValueOf(d), s) |
||||
|
} |
||||
|
case []interface{}: |
||||
|
switch d := d.(type) { |
||||
|
case *[]interface{}: |
||||
|
*d = s |
||||
|
case *interface{}: |
||||
|
*d = s |
||||
|
case nil: |
||||
|
// skip value
|
||||
|
default: |
||||
|
if d := reflect.ValueOf(d); d.Type().Kind() != reflect.Ptr { |
||||
|
err = cannotConvert(d, s) |
||||
|
} else { |
||||
|
err = convertAssignArray(d.Elem(), s) |
||||
|
} |
||||
|
} |
||||
|
case Error: |
||||
|
err = s |
||||
|
default: |
||||
|
err = cannotConvert(reflect.ValueOf(d), s) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// Scan copies from src to the values pointed at by dest.
|
||||
|
//
|
||||
|
// The values pointed at by dest must be an integer, float, boolean, string,
|
||||
|
// []byte, interface{} or slices of these types. Scan uses the standard strconv
|
||||
|
// package to convert bulk strings to numeric and boolean types.
|
||||
|
//
|
||||
|
// If a dest value is nil, then the corresponding src value is skipped.
|
||||
|
//
|
||||
|
// If a src element is nil, then the corresponding dest value is not modified.
|
||||
|
//
|
||||
|
// To enable easy use of Scan in a loop, Scan returns the slice of src
|
||||
|
// following the copied values.
|
||||
|
func Scan(src []interface{}, dest ...interface{}) ([]interface{}, error) { |
||||
|
if len(src) < len(dest) { |
||||
|
return nil, errors.New("redigo.Scan: array short") |
||||
|
} |
||||
|
var err error |
||||
|
for i, d := range dest { |
||||
|
err = convertAssign(d, src[i]) |
||||
|
if err != nil { |
||||
|
err = fmt.Errorf("redigo.Scan: cannot assign to dest %d: %v", i, err) |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
return src[len(dest):], err |
||||
|
} |
||||
|
|
||||
|
type fieldSpec struct { |
||||
|
name string |
||||
|
index []int |
||||
|
omitEmpty bool |
||||
|
} |
||||
|
|
||||
|
type structSpec struct { |
||||
|
m map[string]*fieldSpec |
||||
|
l []*fieldSpec |
||||
|
} |
||||
|
|
||||
|
func (ss *structSpec) fieldSpec(name []byte) *fieldSpec { |
||||
|
return ss.m[string(name)] |
||||
|
} |
||||
|
|
||||
|
func compileStructSpec(t reflect.Type, depth map[string]int, index []int, ss *structSpec) { |
||||
|
for i := 0; i < t.NumField(); i++ { |
||||
|
f := t.Field(i) |
||||
|
switch { |
||||
|
case f.PkgPath != "" && !f.Anonymous: |
||||
|
// Ignore unexported fields.
|
||||
|
case f.Anonymous: |
||||
|
// TODO: Handle pointers. Requires change to decoder and
|
||||
|
// protection against infinite recursion.
|
||||
|
if f.Type.Kind() == reflect.Struct { |
||||
|
compileStructSpec(f.Type, depth, append(index, i), ss) |
||||
|
} |
||||
|
default: |
||||
|
fs := &fieldSpec{name: f.Name} |
||||
|
tag := f.Tag.Get("redis") |
||||
|
p := strings.Split(tag, ",") |
||||
|
if len(p) > 0 { |
||||
|
if p[0] == "-" { |
||||
|
continue |
||||
|
} |
||||
|
if len(p[0]) > 0 { |
||||
|
fs.name = p[0] |
||||
|
} |
||||
|
for _, s := range p[1:] { |
||||
|
switch s { |
||||
|
case "omitempty": |
||||
|
fs.omitEmpty = true |
||||
|
default: |
||||
|
panic(fmt.Errorf("redigo: unknown field tag %s for type %s", s, t.Name())) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
d, found := depth[fs.name] |
||||
|
if !found { |
||||
|
d = 1 << 30 |
||||
|
} |
||||
|
switch { |
||||
|
case len(index) == d: |
||||
|
// At same depth, remove from result.
|
||||
|
delete(ss.m, fs.name) |
||||
|
j := 0 |
||||
|
for i := 0; i < len(ss.l); i++ { |
||||
|
if fs.name != ss.l[i].name { |
||||
|
ss.l[j] = ss.l[i] |
||||
|
j += 1 |
||||
|
} |
||||
|
} |
||||
|
ss.l = ss.l[:j] |
||||
|
case len(index) < d: |
||||
|
fs.index = make([]int, len(index)+1) |
||||
|
copy(fs.index, index) |
||||
|
fs.index[len(index)] = i |
||||
|
depth[fs.name] = len(index) |
||||
|
ss.m[fs.name] = fs |
||||
|
ss.l = append(ss.l, fs) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
var ( |
||||
|
structSpecMutex sync.RWMutex |
||||
|
structSpecCache = make(map[reflect.Type]*structSpec) |
||||
|
defaultFieldSpec = &fieldSpec{} |
||||
|
) |
||||
|
|
||||
|
func structSpecForType(t reflect.Type) *structSpec { |
||||
|
|
||||
|
structSpecMutex.RLock() |
||||
|
ss, found := structSpecCache[t] |
||||
|
structSpecMutex.RUnlock() |
||||
|
if found { |
||||
|
return ss |
||||
|
} |
||||
|
|
||||
|
structSpecMutex.Lock() |
||||
|
defer structSpecMutex.Unlock() |
||||
|
ss, found = structSpecCache[t] |
||||
|
if found { |
||||
|
return ss |
||||
|
} |
||||
|
|
||||
|
ss = &structSpec{m: make(map[string]*fieldSpec)} |
||||
|
compileStructSpec(t, make(map[string]int), nil, ss) |
||||
|
structSpecCache[t] = ss |
||||
|
return ss |
||||
|
} |
||||
|
|
||||
|
var errScanStructValue = errors.New("redigo.ScanStruct: value must be non-nil pointer to a struct") |
||||
|
|
||||
|
// ScanStruct scans alternating names and values from src to a struct. The
|
||||
|
// HGETALL and CONFIG GET commands return replies in this format.
|
||||
|
//
|
||||
|
// ScanStruct uses exported field names to match values in the response. Use
|
||||
|
// 'redis' field tag to override the name:
|
||||
|
//
|
||||
|
// Field int `redis:"myName"`
|
||||
|
//
|
||||
|
// Fields with the tag redis:"-" are ignored.
|
||||
|
//
|
||||
|
// Integer, float, boolean, string and []byte fields are supported. Scan uses the
|
||||
|
// standard strconv package to convert bulk string values to numeric and
|
||||
|
// boolean types.
|
||||
|
//
|
||||
|
// If a src element is nil, then the corresponding field is not modified.
|
||||
|
func ScanStruct(src []interface{}, dest interface{}) error { |
||||
|
d := reflect.ValueOf(dest) |
||||
|
if d.Kind() != reflect.Ptr || d.IsNil() { |
||||
|
return errScanStructValue |
||||
|
} |
||||
|
d = d.Elem() |
||||
|
if d.Kind() != reflect.Struct { |
||||
|
return errScanStructValue |
||||
|
} |
||||
|
ss := structSpecForType(d.Type()) |
||||
|
|
||||
|
if len(src)%2 != 0 { |
||||
|
return errors.New("redigo.ScanStruct: number of values not a multiple of 2") |
||||
|
} |
||||
|
|
||||
|
for i := 0; i < len(src); i += 2 { |
||||
|
s := src[i+1] |
||||
|
if s == nil { |
||||
|
continue |
||||
|
} |
||||
|
name, ok := src[i].([]byte) |
||||
|
if !ok { |
||||
|
return fmt.Errorf("redigo.ScanStruct: key %d not a bulk string value", i) |
||||
|
} |
||||
|
fs := ss.fieldSpec(name) |
||||
|
if fs == nil { |
||||
|
continue |
||||
|
} |
||||
|
if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { |
||||
|
return fmt.Errorf("redigo.ScanStruct: cannot assign field %s: %v", fs.name, err) |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
var ( |
||||
|
errScanSliceValue = errors.New("redigo.ScanSlice: dest must be non-nil pointer to a struct") |
||||
|
) |
||||
|
|
||||
|
// ScanSlice scans src to the slice pointed to by dest. The elements the dest
|
||||
|
// slice must be integer, float, boolean, string, struct or pointer to struct
|
||||
|
// values.
|
||||
|
//
|
||||
|
// Struct fields must be integer, float, boolean or string values. All struct
|
||||
|
// fields are used unless a subset is specified using fieldNames.
|
||||
|
func ScanSlice(src []interface{}, dest interface{}, fieldNames ...string) error { |
||||
|
d := reflect.ValueOf(dest) |
||||
|
if d.Kind() != reflect.Ptr || d.IsNil() { |
||||
|
return errScanSliceValue |
||||
|
} |
||||
|
d = d.Elem() |
||||
|
if d.Kind() != reflect.Slice { |
||||
|
return errScanSliceValue |
||||
|
} |
||||
|
|
||||
|
isPtr := false |
||||
|
t := d.Type().Elem() |
||||
|
if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { |
||||
|
isPtr = true |
||||
|
t = t.Elem() |
||||
|
} |
||||
|
|
||||
|
if t.Kind() != reflect.Struct { |
||||
|
ensureLen(d, len(src)) |
||||
|
for i, s := range src { |
||||
|
if s == nil { |
||||
|
continue |
||||
|
} |
||||
|
if err := convertAssignValue(d.Index(i), s); err != nil { |
||||
|
return fmt.Errorf("redigo.ScanSlice: cannot assign element %d: %v", i, err) |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
ss := structSpecForType(t) |
||||
|
fss := ss.l |
||||
|
if len(fieldNames) > 0 { |
||||
|
fss = make([]*fieldSpec, len(fieldNames)) |
||||
|
for i, name := range fieldNames { |
||||
|
fss[i] = ss.m[name] |
||||
|
if fss[i] == nil { |
||||
|
return fmt.Errorf("redigo.ScanSlice: ScanSlice bad field name %s", name) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if len(fss) == 0 { |
||||
|
return errors.New("redigo.ScanSlice: no struct fields") |
||||
|
} |
||||
|
|
||||
|
n := len(src) / len(fss) |
||||
|
if n*len(fss) != len(src) { |
||||
|
return errors.New("redigo.ScanSlice: length not a multiple of struct field count") |
||||
|
} |
||||
|
|
||||
|
ensureLen(d, n) |
||||
|
for i := 0; i < n; i++ { |
||||
|
d := d.Index(i) |
||||
|
if isPtr { |
||||
|
if d.IsNil() { |
||||
|
d.Set(reflect.New(t)) |
||||
|
} |
||||
|
d = d.Elem() |
||||
|
} |
||||
|
for j, fs := range fss { |
||||
|
s := src[i*len(fss)+j] |
||||
|
if s == nil { |
||||
|
continue |
||||
|
} |
||||
|
if err := convertAssignValue(d.FieldByIndex(fs.index), s); err != nil { |
||||
|
return fmt.Errorf("redigo.ScanSlice: cannot assign element %d to field %s: %v", i*len(fss)+j, fs.name, err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// Args is a helper for constructing command arguments from structured values.
|
||||
|
type Args []interface{} |
||||
|
|
||||
|
// Add returns the result of appending value to args.
|
||||
|
func (args Args) Add(value ...interface{}) Args { |
||||
|
return append(args, value...) |
||||
|
} |
||||
|
|
||||
|
// AddFlat returns the result of appending the flattened value of v to args.
|
||||
|
//
|
||||
|
// Maps are flattened by appending the alternating keys and map values to args.
|
||||
|
//
|
||||
|
// Slices are flattened by appending the slice elements to args.
|
||||
|
//
|
||||
|
// Structs are flattened by appending the alternating names and values of
|
||||
|
// exported fields to args. If v is a nil struct pointer, then nothing is
|
||||
|
// appended. The 'redis' field tag overrides struct field names. See ScanStruct
|
||||
|
// for more information on the use of the 'redis' field tag.
|
||||
|
//
|
||||
|
// Other types are appended to args as is.
|
||||
|
func (args Args) AddFlat(v interface{}) Args { |
||||
|
rv := reflect.ValueOf(v) |
||||
|
switch rv.Kind() { |
||||
|
case reflect.Struct: |
||||
|
args = flattenStruct(args, rv) |
||||
|
case reflect.Slice: |
||||
|
for i := 0; i < rv.Len(); i++ { |
||||
|
args = append(args, rv.Index(i).Interface()) |
||||
|
} |
||||
|
case reflect.Map: |
||||
|
for _, k := range rv.MapKeys() { |
||||
|
args = append(args, k.Interface(), rv.MapIndex(k).Interface()) |
||||
|
} |
||||
|
case reflect.Ptr: |
||||
|
if rv.Type().Elem().Kind() == reflect.Struct { |
||||
|
if !rv.IsNil() { |
||||
|
args = flattenStruct(args, rv.Elem()) |
||||
|
} |
||||
|
} else { |
||||
|
args = append(args, v) |
||||
|
} |
||||
|
default: |
||||
|
args = append(args, v) |
||||
|
} |
||||
|
return args |
||||
|
} |
||||
|
|
||||
|
func flattenStruct(args Args, v reflect.Value) Args { |
||||
|
ss := structSpecForType(v.Type()) |
||||
|
for _, fs := range ss.l { |
||||
|
fv := v.FieldByIndex(fs.index) |
||||
|
if fs.omitEmpty { |
||||
|
var empty = false |
||||
|
switch fv.Kind() { |
||||
|
case reflect.Array, reflect.Map, reflect.Slice, reflect.String: |
||||
|
empty = fv.Len() == 0 |
||||
|
case reflect.Bool: |
||||
|
empty = !fv.Bool() |
||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: |
||||
|
empty = fv.Int() == 0 |
||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: |
||||
|
empty = fv.Uint() == 0 |
||||
|
case reflect.Float32, reflect.Float64: |
||||
|
empty = fv.Float() == 0 |
||||
|
case reflect.Interface, reflect.Ptr: |
||||
|
empty = fv.IsNil() |
||||
|
} |
||||
|
if empty { |
||||
|
continue |
||||
|
} |
||||
|
} |
||||
|
args = append(args, fs.name, fv.Interface()) |
||||
|
} |
||||
|
return args |
||||
|
} |
@ -0,0 +1,440 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis_test |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"math" |
||||
|
"reflect" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/garyburd/redigo/redis" |
||||
|
) |
||||
|
|
||||
|
var scanConversionTests = []struct { |
||||
|
src interface{} |
||||
|
dest interface{} |
||||
|
}{ |
||||
|
{[]byte("-inf"), math.Inf(-1)}, |
||||
|
{[]byte("+inf"), math.Inf(1)}, |
||||
|
{[]byte("0"), float64(0)}, |
||||
|
{[]byte("3.14159"), float64(3.14159)}, |
||||
|
{[]byte("3.14"), float32(3.14)}, |
||||
|
{[]byte("-100"), int(-100)}, |
||||
|
{[]byte("101"), int(101)}, |
||||
|
{int64(102), int(102)}, |
||||
|
{[]byte("103"), uint(103)}, |
||||
|
{int64(104), uint(104)}, |
||||
|
{[]byte("105"), int8(105)}, |
||||
|
{int64(106), int8(106)}, |
||||
|
{[]byte("107"), uint8(107)}, |
||||
|
{int64(108), uint8(108)}, |
||||
|
{[]byte("0"), false}, |
||||
|
{int64(0), false}, |
||||
|
{[]byte("f"), false}, |
||||
|
{[]byte("1"), true}, |
||||
|
{int64(1), true}, |
||||
|
{[]byte("t"), true}, |
||||
|
{"hello", "hello"}, |
||||
|
{[]byte("hello"), "hello"}, |
||||
|
{[]byte("world"), []byte("world")}, |
||||
|
{[]interface{}{[]byte("foo")}, []interface{}{[]byte("foo")}}, |
||||
|
{[]interface{}{[]byte("foo")}, []string{"foo"}}, |
||||
|
{[]interface{}{[]byte("hello"), []byte("world")}, []string{"hello", "world"}}, |
||||
|
{[]interface{}{[]byte("bar")}, [][]byte{[]byte("bar")}}, |
||||
|
{[]interface{}{[]byte("1")}, []int{1}}, |
||||
|
{[]interface{}{[]byte("1"), []byte("2")}, []int{1, 2}}, |
||||
|
{[]interface{}{[]byte("1"), []byte("2")}, []float64{1, 2}}, |
||||
|
{[]interface{}{[]byte("1")}, []byte{1}}, |
||||
|
{[]interface{}{[]byte("1")}, []bool{true}}, |
||||
|
} |
||||
|
|
||||
|
func TestScanConversion(t *testing.T) { |
||||
|
for _, tt := range scanConversionTests { |
||||
|
values := []interface{}{tt.src} |
||||
|
dest := reflect.New(reflect.TypeOf(tt.dest)) |
||||
|
values, err := redis.Scan(values, dest.Interface()) |
||||
|
if err != nil { |
||||
|
t.Errorf("Scan(%v) returned error %v", tt, err) |
||||
|
continue |
||||
|
} |
||||
|
if !reflect.DeepEqual(tt.dest, dest.Elem().Interface()) { |
||||
|
t.Errorf("Scan(%v) returned %v, want %v", tt, dest.Elem().Interface(), tt.dest) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
var scanConversionErrorTests = []struct { |
||||
|
src interface{} |
||||
|
dest interface{} |
||||
|
}{ |
||||
|
{[]byte("1234"), byte(0)}, |
||||
|
{int64(1234), byte(0)}, |
||||
|
{[]byte("-1"), byte(0)}, |
||||
|
{int64(-1), byte(0)}, |
||||
|
{[]byte("junk"), false}, |
||||
|
{redis.Error("blah"), false}, |
||||
|
} |
||||
|
|
||||
|
func TestScanConversionError(t *testing.T) { |
||||
|
for _, tt := range scanConversionErrorTests { |
||||
|
values := []interface{}{tt.src} |
||||
|
dest := reflect.New(reflect.TypeOf(tt.dest)) |
||||
|
values, err := redis.Scan(values, dest.Interface()) |
||||
|
if err == nil { |
||||
|
t.Errorf("Scan(%v) did not return error", tt) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func ExampleScan() { |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
c.Send("HMSET", "album:1", "title", "Red", "rating", 5) |
||||
|
c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) |
||||
|
c.Send("HMSET", "album:3", "title", "Beat") |
||||
|
c.Send("LPUSH", "albums", "1") |
||||
|
c.Send("LPUSH", "albums", "2") |
||||
|
c.Send("LPUSH", "albums", "3") |
||||
|
values, err := redis.Values(c.Do("SORT", "albums", |
||||
|
"BY", "album:*->rating", |
||||
|
"GET", "album:*->title", |
||||
|
"GET", "album:*->rating")) |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
for len(values) > 0 { |
||||
|
var title string |
||||
|
rating := -1 // initialize to illegal value to detect nil.
|
||||
|
values, err = redis.Scan(values, &title, &rating) |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
if rating == -1 { |
||||
|
fmt.Println(title, "not-rated") |
||||
|
} else { |
||||
|
fmt.Println(title, rating) |
||||
|
} |
||||
|
} |
||||
|
// Output:
|
||||
|
// Beat not-rated
|
||||
|
// Earthbound 1
|
||||
|
// Red 5
|
||||
|
} |
||||
|
|
||||
|
type s0 struct { |
||||
|
X int |
||||
|
Y int `redis:"y"` |
||||
|
Bt bool |
||||
|
} |
||||
|
|
||||
|
type s1 struct { |
||||
|
X int `redis:"-"` |
||||
|
I int `redis:"i"` |
||||
|
U uint `redis:"u"` |
||||
|
S string `redis:"s"` |
||||
|
P []byte `redis:"p"` |
||||
|
B bool `redis:"b"` |
||||
|
Bt bool |
||||
|
Bf bool |
||||
|
s0 |
||||
|
} |
||||
|
|
||||
|
var scanStructTests = []struct { |
||||
|
title string |
||||
|
reply []string |
||||
|
value interface{} |
||||
|
}{ |
||||
|
{"basic", |
||||
|
[]string{"i", "-1234", "u", "5678", "s", "hello", "p", "world", "b", "t", "Bt", "1", "Bf", "0", "X", "123", "y", "456"}, |
||||
|
&s1{I: -1234, U: 5678, S: "hello", P: []byte("world"), B: true, Bt: true, Bf: false, s0: s0{X: 123, Y: 456}}, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
func TestScanStruct(t *testing.T) { |
||||
|
for _, tt := range scanStructTests { |
||||
|
|
||||
|
var reply []interface{} |
||||
|
for _, v := range tt.reply { |
||||
|
reply = append(reply, []byte(v)) |
||||
|
} |
||||
|
|
||||
|
value := reflect.New(reflect.ValueOf(tt.value).Type().Elem()) |
||||
|
|
||||
|
if err := redis.ScanStruct(reply, value.Interface()); err != nil { |
||||
|
t.Fatalf("ScanStruct(%s) returned error %v", tt.title, err) |
||||
|
} |
||||
|
|
||||
|
if !reflect.DeepEqual(value.Interface(), tt.value) { |
||||
|
t.Fatalf("ScanStruct(%s) returned %v, want %v", tt.title, value.Interface(), tt.value) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestBadScanStructArgs(t *testing.T) { |
||||
|
x := []interface{}{"A", "b"} |
||||
|
test := func(v interface{}) { |
||||
|
if err := redis.ScanStruct(x, v); err == nil { |
||||
|
t.Errorf("Expect error for ScanStruct(%T, %T)", x, v) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
test(nil) |
||||
|
|
||||
|
var v0 *struct{} |
||||
|
test(v0) |
||||
|
|
||||
|
var v1 int |
||||
|
test(&v1) |
||||
|
|
||||
|
x = x[:1] |
||||
|
v2 := struct{ A string }{} |
||||
|
test(&v2) |
||||
|
} |
||||
|
|
||||
|
var scanSliceTests = []struct { |
||||
|
src []interface{} |
||||
|
fieldNames []string |
||||
|
ok bool |
||||
|
dest interface{} |
||||
|
}{ |
||||
|
{ |
||||
|
[]interface{}{[]byte("1"), nil, []byte("-1")}, |
||||
|
nil, |
||||
|
true, |
||||
|
[]int{1, 0, -1}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{[]byte("1"), nil, []byte("2")}, |
||||
|
nil, |
||||
|
true, |
||||
|
[]uint{1, 0, 2}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{[]byte("-1")}, |
||||
|
nil, |
||||
|
false, |
||||
|
[]uint{1}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{[]byte("hello"), nil, []byte("world")}, |
||||
|
nil, |
||||
|
true, |
||||
|
[][]byte{[]byte("hello"), nil, []byte("world")}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{[]byte("hello"), nil, []byte("world")}, |
||||
|
nil, |
||||
|
true, |
||||
|
[]string{"hello", "", "world"}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, |
||||
|
nil, |
||||
|
true, |
||||
|
[]struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{[]byte("a1"), []byte("b1")}, |
||||
|
nil, |
||||
|
false, |
||||
|
[]struct{ A, B, C string }{{"a1", "b1", ""}}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, |
||||
|
nil, |
||||
|
true, |
||||
|
[]*struct{ A, B string }{{"a1", "b1"}, {"a2", "b2"}}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, |
||||
|
[]string{"A", "B"}, |
||||
|
true, |
||||
|
[]struct{ A, C, B string }{{"a1", "", "b1"}, {"a2", "", "b2"}}, |
||||
|
}, |
||||
|
{ |
||||
|
[]interface{}{[]byte("a1"), []byte("b1"), []byte("a2"), []byte("b2")}, |
||||
|
nil, |
||||
|
false, |
||||
|
[]struct{}{}, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
func TestScanSlice(t *testing.T) { |
||||
|
for _, tt := range scanSliceTests { |
||||
|
|
||||
|
typ := reflect.ValueOf(tt.dest).Type() |
||||
|
dest := reflect.New(typ) |
||||
|
|
||||
|
err := redis.ScanSlice(tt.src, dest.Interface(), tt.fieldNames...) |
||||
|
if tt.ok != (err == nil) { |
||||
|
t.Errorf("ScanSlice(%v, []%s, %v) returned error %v", tt.src, typ, tt.fieldNames, err) |
||||
|
continue |
||||
|
} |
||||
|
if tt.ok && !reflect.DeepEqual(dest.Elem().Interface(), tt.dest) { |
||||
|
t.Errorf("ScanSlice(src, []%s) returned %#v, want %#v", typ, dest.Elem().Interface(), tt.dest) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func ExampleScanSlice() { |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
c.Send("HMSET", "album:1", "title", "Red", "rating", 5) |
||||
|
c.Send("HMSET", "album:2", "title", "Earthbound", "rating", 1) |
||||
|
c.Send("HMSET", "album:3", "title", "Beat", "rating", 4) |
||||
|
c.Send("LPUSH", "albums", "1") |
||||
|
c.Send("LPUSH", "albums", "2") |
||||
|
c.Send("LPUSH", "albums", "3") |
||||
|
values, err := redis.Values(c.Do("SORT", "albums", |
||||
|
"BY", "album:*->rating", |
||||
|
"GET", "album:*->title", |
||||
|
"GET", "album:*->rating")) |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
var albums []struct { |
||||
|
Title string |
||||
|
Rating int |
||||
|
} |
||||
|
if err := redis.ScanSlice(values, &albums); err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
fmt.Printf("%v\n", albums) |
||||
|
// Output:
|
||||
|
// [{Earthbound 1} {Beat 4} {Red 5}]
|
||||
|
} |
||||
|
|
||||
|
var argsTests = []struct { |
||||
|
title string |
||||
|
actual redis.Args |
||||
|
expected redis.Args |
||||
|
}{ |
||||
|
{"struct ptr", |
||||
|
redis.Args{}.AddFlat(&struct { |
||||
|
I int `redis:"i"` |
||||
|
U uint `redis:"u"` |
||||
|
S string `redis:"s"` |
||||
|
P []byte `redis:"p"` |
||||
|
M map[string]string `redis:"m"` |
||||
|
Bt bool |
||||
|
Bf bool |
||||
|
}{ |
||||
|
-1234, 5678, "hello", []byte("world"), map[string]string{"hello": "world"}, true, false, |
||||
|
}), |
||||
|
redis.Args{"i", int(-1234), "u", uint(5678), "s", "hello", "p", []byte("world"), "m", map[string]string{"hello": "world"}, "Bt", true, "Bf", false}, |
||||
|
}, |
||||
|
{"struct", |
||||
|
redis.Args{}.AddFlat(struct{ I int }{123}), |
||||
|
redis.Args{"I", 123}, |
||||
|
}, |
||||
|
{"slice", |
||||
|
redis.Args{}.Add(1).AddFlat([]string{"a", "b", "c"}).Add(2), |
||||
|
redis.Args{1, "a", "b", "c", 2}, |
||||
|
}, |
||||
|
{"struct omitempty", |
||||
|
redis.Args{}.AddFlat(&struct { |
||||
|
I int `redis:"i,omitempty"` |
||||
|
U uint `redis:"u,omitempty"` |
||||
|
S string `redis:"s,omitempty"` |
||||
|
P []byte `redis:"p,omitempty"` |
||||
|
M map[string]string `redis:"m,omitempty"` |
||||
|
Bt bool `redis:"Bt,omitempty"` |
||||
|
Bf bool `redis:"Bf,omitempty"` |
||||
|
}{ |
||||
|
0, 0, "", []byte{}, map[string]string{}, true, false, |
||||
|
}), |
||||
|
redis.Args{"Bt", true}, |
||||
|
}, |
||||
|
} |
||||
|
|
||||
|
func TestArgs(t *testing.T) { |
||||
|
for _, tt := range argsTests { |
||||
|
if !reflect.DeepEqual(tt.actual, tt.expected) { |
||||
|
t.Fatalf("%s is %v, want %v", tt.title, tt.actual, tt.expected) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func ExampleArgs() { |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
var p1, p2 struct { |
||||
|
Title string `redis:"title"` |
||||
|
Author string `redis:"author"` |
||||
|
Body string `redis:"body"` |
||||
|
} |
||||
|
|
||||
|
p1.Title = "Example" |
||||
|
p1.Author = "Gary" |
||||
|
p1.Body = "Hello" |
||||
|
|
||||
|
if _, err := c.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
m := map[string]string{ |
||||
|
"title": "Example2", |
||||
|
"author": "Steve", |
||||
|
"body": "Map", |
||||
|
} |
||||
|
|
||||
|
if _, err := c.Do("HMSET", redis.Args{}.Add("id2").AddFlat(m)...); err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
for _, id := range []string{"id1", "id2"} { |
||||
|
|
||||
|
v, err := redis.Values(c.Do("HGETALL", id)) |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if err := redis.ScanStruct(v, &p2); err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
fmt.Printf("%+v\n", p2) |
||||
|
} |
||||
|
|
||||
|
// Output:
|
||||
|
// {Title:Example Author:Gary Body:Hello}
|
||||
|
// {Title:Example2 Author:Steve Body:Map}
|
||||
|
} |
@ -0,0 +1,86 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis |
||||
|
|
||||
|
import ( |
||||
|
"crypto/sha1" |
||||
|
"encoding/hex" |
||||
|
"io" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
// Script encapsulates the source, hash and key count for a Lua script. See
|
||||
|
// http://redis.io/commands/eval for information on scripts in Redis.
|
||||
|
type Script struct { |
||||
|
keyCount int |
||||
|
src string |
||||
|
hash string |
||||
|
} |
||||
|
|
||||
|
// NewScript returns a new script object. If keyCount is greater than or equal
|
||||
|
// to zero, then the count is automatically inserted in the EVAL command
|
||||
|
// argument list. If keyCount is less than zero, then the application supplies
|
||||
|
// the count as the first value in the keysAndArgs argument to the Do, Send and
|
||||
|
// SendHash methods.
|
||||
|
func NewScript(keyCount int, src string) *Script { |
||||
|
h := sha1.New() |
||||
|
io.WriteString(h, src) |
||||
|
return &Script{keyCount, src, hex.EncodeToString(h.Sum(nil))} |
||||
|
} |
||||
|
|
||||
|
func (s *Script) args(spec string, keysAndArgs []interface{}) []interface{} { |
||||
|
var args []interface{} |
||||
|
if s.keyCount < 0 { |
||||
|
args = make([]interface{}, 1+len(keysAndArgs)) |
||||
|
args[0] = spec |
||||
|
copy(args[1:], keysAndArgs) |
||||
|
} else { |
||||
|
args = make([]interface{}, 2+len(keysAndArgs)) |
||||
|
args[0] = spec |
||||
|
args[1] = s.keyCount |
||||
|
copy(args[2:], keysAndArgs) |
||||
|
} |
||||
|
return args |
||||
|
} |
||||
|
|
||||
|
// Do evaluates the script. Under the covers, Do optimistically evaluates the
|
||||
|
// script using the EVALSHA command. If the command fails because the script is
|
||||
|
// not loaded, then Do evaluates the script using the EVAL command (thus
|
||||
|
// causing the script to load).
|
||||
|
func (s *Script) Do(c Conn, keysAndArgs ...interface{}) (interface{}, error) { |
||||
|
v, err := c.Do("EVALSHA", s.args(s.hash, keysAndArgs)...) |
||||
|
if e, ok := err.(Error); ok && strings.HasPrefix(string(e), "NOSCRIPT ") { |
||||
|
v, err = c.Do("EVAL", s.args(s.src, keysAndArgs)...) |
||||
|
} |
||||
|
return v, err |
||||
|
} |
||||
|
|
||||
|
// SendHash evaluates the script without waiting for the reply. The script is
|
||||
|
// evaluated with the EVALSHA command. The application must ensure that the
|
||||
|
// script is loaded by a previous call to Send, Do or Load methods.
|
||||
|
func (s *Script) SendHash(c Conn, keysAndArgs ...interface{}) error { |
||||
|
return c.Send("EVALSHA", s.args(s.hash, keysAndArgs)...) |
||||
|
} |
||||
|
|
||||
|
// Send evaluates the script without waiting for the reply.
|
||||
|
func (s *Script) Send(c Conn, keysAndArgs ...interface{}) error { |
||||
|
return c.Send("EVAL", s.args(s.src, keysAndArgs)...) |
||||
|
} |
||||
|
|
||||
|
// Load loads the script without evaluating it.
|
||||
|
func (s *Script) Load(c Conn) error { |
||||
|
_, err := c.Do("SCRIPT", "LOAD", s.src) |
||||
|
return err |
||||
|
} |
@ -0,0 +1,100 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis_test |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"reflect" |
||||
|
"testing" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/garyburd/redigo/redis" |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
// These variables are declared at package level to remove distracting
|
||||
|
// details from the examples.
|
||||
|
c redis.Conn |
||||
|
reply interface{} |
||||
|
err error |
||||
|
) |
||||
|
|
||||
|
func ExampleScript() { |
||||
|
// Initialize a package-level variable with a script.
|
||||
|
var getScript = redis.NewScript(1, `return redis.call('get', KEYS[1])`) |
||||
|
|
||||
|
// In a function, use the script Do method to evaluate the script. The Do
|
||||
|
// method optimistically uses the EVALSHA command. If the script is not
|
||||
|
// loaded, then the Do method falls back to the EVAL command.
|
||||
|
reply, err = getScript.Do(c, "foo") |
||||
|
} |
||||
|
|
||||
|
func TestScript(t *testing.T) { |
||||
|
c, err := redis.DialDefaultServer() |
||||
|
if err != nil { |
||||
|
t.Fatalf("error connection to database, %v", err) |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
// To test fall back in Do, we make script unique by adding comment with current time.
|
||||
|
script := fmt.Sprintf("--%d\nreturn {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}", time.Now().UnixNano()) |
||||
|
s := redis.NewScript(2, script) |
||||
|
reply := []interface{}{[]byte("key1"), []byte("key2"), []byte("arg1"), []byte("arg2")} |
||||
|
|
||||
|
v, err := s.Do(c, "key1", "key2", "arg1", "arg2") |
||||
|
if err != nil { |
||||
|
t.Errorf("s.Do(c, ...) returned %v", err) |
||||
|
} |
||||
|
|
||||
|
if !reflect.DeepEqual(v, reply) { |
||||
|
t.Errorf("s.Do(c, ..); = %v, want %v", v, reply) |
||||
|
} |
||||
|
|
||||
|
err = s.Load(c) |
||||
|
if err != nil { |
||||
|
t.Errorf("s.Load(c) returned %v", err) |
||||
|
} |
||||
|
|
||||
|
err = s.SendHash(c, "key1", "key2", "arg1", "arg2") |
||||
|
if err != nil { |
||||
|
t.Errorf("s.SendHash(c, ...) returned %v", err) |
||||
|
} |
||||
|
|
||||
|
err = c.Flush() |
||||
|
if err != nil { |
||||
|
t.Errorf("c.Flush() returned %v", err) |
||||
|
} |
||||
|
|
||||
|
v, err = c.Receive() |
||||
|
if !reflect.DeepEqual(v, reply) { |
||||
|
t.Errorf("s.SendHash(c, ..); c.Receive() = %v, want %v", v, reply) |
||||
|
} |
||||
|
|
||||
|
err = s.Send(c, "key1", "key2", "arg1", "arg2") |
||||
|
if err != nil { |
||||
|
t.Errorf("s.Send(c, ...) returned %v", err) |
||||
|
} |
||||
|
|
||||
|
err = c.Flush() |
||||
|
if err != nil { |
||||
|
t.Errorf("c.Flush() returned %v", err) |
||||
|
} |
||||
|
|
||||
|
v, err = c.Receive() |
||||
|
if !reflect.DeepEqual(v, reply) { |
||||
|
t.Errorf("s.Send(c, ..); c.Receive() = %v, want %v", v, reply) |
||||
|
} |
||||
|
|
||||
|
} |
@ -0,0 +1,177 @@ |
|||||
|
// Copyright 2012 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis |
||||
|
|
||||
|
import ( |
||||
|
"bufio" |
||||
|
"errors" |
||||
|
"flag" |
||||
|
"fmt" |
||||
|
"io" |
||||
|
"io/ioutil" |
||||
|
"os" |
||||
|
"os/exec" |
||||
|
"strconv" |
||||
|
"strings" |
||||
|
"sync" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
func SetNowFunc(f func() time.Time) { |
||||
|
nowFunc = f |
||||
|
} |
||||
|
|
||||
|
var ( |
||||
|
ErrNegativeInt = errNegativeInt |
||||
|
|
||||
|
serverPath = flag.String("redis-server", "redis-server", "Path to redis server binary") |
||||
|
serverBasePort = flag.Int("redis-port", 16379, "Beginning of port range for test servers") |
||||
|
serverLogName = flag.String("redis-log", "", "Write Redis server logs to `filename`") |
||||
|
serverLog = ioutil.Discard |
||||
|
|
||||
|
defaultServerMu sync.Mutex |
||||
|
defaultServer *Server |
||||
|
defaultServerErr error |
||||
|
) |
||||
|
|
||||
|
type Server struct { |
||||
|
name string |
||||
|
cmd *exec.Cmd |
||||
|
done chan struct{} |
||||
|
} |
||||
|
|
||||
|
func NewServer(name string, args ...string) (*Server, error) { |
||||
|
s := &Server{ |
||||
|
name: name, |
||||
|
cmd: exec.Command(*serverPath, args...), |
||||
|
done: make(chan struct{}), |
||||
|
} |
||||
|
|
||||
|
r, err := s.cmd.StdoutPipe() |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
err = s.cmd.Start() |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
ready := make(chan error, 1) |
||||
|
go s.watch(r, ready) |
||||
|
|
||||
|
select { |
||||
|
case err = <-ready: |
||||
|
case <-time.After(time.Second * 10): |
||||
|
err = errors.New("timeout waiting for server to start") |
||||
|
} |
||||
|
|
||||
|
if err != nil { |
||||
|
s.Stop() |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
return s, nil |
||||
|
} |
||||
|
|
||||
|
func (s *Server) watch(r io.Reader, ready chan error) { |
||||
|
fmt.Fprintf(serverLog, "%d START %s \n", s.cmd.Process.Pid, s.name) |
||||
|
var listening bool |
||||
|
var text string |
||||
|
scn := bufio.NewScanner(r) |
||||
|
for scn.Scan() { |
||||
|
text = scn.Text() |
||||
|
fmt.Fprintf(serverLog, "%s\n", text) |
||||
|
if !listening { |
||||
|
if strings.Contains(text, "The server is now ready to accept connections on port") { |
||||
|
listening = true |
||||
|
ready <- nil |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
if !listening { |
||||
|
ready <- fmt.Errorf("server exited: %s", text) |
||||
|
} |
||||
|
s.cmd.Wait() |
||||
|
fmt.Fprintf(serverLog, "%d STOP %s \n", s.cmd.Process.Pid, s.name) |
||||
|
close(s.done) |
||||
|
} |
||||
|
|
||||
|
func (s *Server) Stop() { |
||||
|
s.cmd.Process.Signal(os.Interrupt) |
||||
|
<-s.done |
||||
|
} |
||||
|
|
||||
|
// stopDefaultServer stops the server created by DialDefaultServer.
|
||||
|
func stopDefaultServer() { |
||||
|
defaultServerMu.Lock() |
||||
|
defer defaultServerMu.Unlock() |
||||
|
if defaultServer != nil { |
||||
|
defaultServer.Stop() |
||||
|
defaultServer = nil |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// startDefaultServer starts the default server if not already running.
|
||||
|
func startDefaultServer() error { |
||||
|
defaultServerMu.Lock() |
||||
|
defer defaultServerMu.Unlock() |
||||
|
if defaultServer != nil || defaultServerErr != nil { |
||||
|
return defaultServerErr |
||||
|
} |
||||
|
defaultServer, defaultServerErr = NewServer( |
||||
|
"default", |
||||
|
"--port", strconv.Itoa(*serverBasePort), |
||||
|
"--save", "", |
||||
|
"--appendonly", "no") |
||||
|
return defaultServerErr |
||||
|
} |
||||
|
|
||||
|
// DialDefaultServer starts the test server if not already started and dials a
|
||||
|
// connection to the server.
|
||||
|
func DialDefaultServer() (Conn, error) { |
||||
|
if err := startDefaultServer(); err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
c, err := Dial("tcp", fmt.Sprintf(":%d", *serverBasePort), DialReadTimeout(1*time.Second), DialWriteTimeout(1*time.Second)) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
c.Do("FLUSHDB") |
||||
|
return c, nil |
||||
|
} |
||||
|
|
||||
|
func TestMain(m *testing.M) { |
||||
|
os.Exit(func() int { |
||||
|
flag.Parse() |
||||
|
|
||||
|
var f *os.File |
||||
|
if *serverLogName != "" { |
||||
|
var err error |
||||
|
f, err = os.OpenFile(*serverLogName, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600) |
||||
|
if err != nil { |
||||
|
fmt.Fprintf(os.Stderr, "Error opening redis-log: %v\n", err) |
||||
|
return 1 |
||||
|
} |
||||
|
defer f.Close() |
||||
|
serverLog = f |
||||
|
} |
||||
|
|
||||
|
defer stopDefaultServer() |
||||
|
|
||||
|
return m.Run() |
||||
|
}()) |
||||
|
} |
@ -0,0 +1,113 @@ |
|||||
|
// Copyright 2013 Gary Burd
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
||||
|
// not use this file except in compliance with the License. You may obtain
|
||||
|
// a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
|
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
|
// License for the specific language governing permissions and limitations
|
||||
|
// under the License.
|
||||
|
|
||||
|
package redis_test |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"github.com/garyburd/redigo/redis" |
||||
|
) |
||||
|
|
||||
|
// zpop pops a value from the ZSET key using WATCH/MULTI/EXEC commands.
|
||||
|
func zpop(c redis.Conn, key string) (result string, err error) { |
||||
|
|
||||
|
defer func() { |
||||
|
// Return connection to normal state on error.
|
||||
|
if err != nil { |
||||
|
c.Do("DISCARD") |
||||
|
} |
||||
|
}() |
||||
|
|
||||
|
// Loop until transaction is successful.
|
||||
|
for { |
||||
|
if _, err := c.Do("WATCH", key); err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
|
||||
|
members, err := redis.Strings(c.Do("ZRANGE", key, 0, 0)) |
||||
|
if err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
if len(members) != 1 { |
||||
|
return "", redis.ErrNil |
||||
|
} |
||||
|
|
||||
|
c.Send("MULTI") |
||||
|
c.Send("ZREM", key, members[0]) |
||||
|
queued, err := c.Do("EXEC") |
||||
|
if err != nil { |
||||
|
return "", err |
||||
|
} |
||||
|
|
||||
|
if queued != nil { |
||||
|
result = members[0] |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return result, nil |
||||
|
} |
||||
|
|
||||
|
// zpopScript pops a value from a ZSET.
|
||||
|
var zpopScript = redis.NewScript(1, ` |
||||
|
local r = redis.call('ZRANGE', KEYS[1], 0, 0) |
||||
|
if r ~= nil then |
||||
|
r = r[1] |
||||
|
redis.call('ZREM', KEYS[1], r) |
||||
|
end |
||||
|
return r |
||||
|
`) |
||||
|
|
||||
|
// This example implements ZPOP as described at
|
||||
|
// http://redis.io/topics/transactions using WATCH/MULTI/EXEC and scripting.
|
||||
|
func Example_zpop() { |
||||
|
c, err := dial() |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
defer c.Close() |
||||
|
|
||||
|
// Add test data using a pipeline.
|
||||
|
|
||||
|
for i, member := range []string{"red", "blue", "green"} { |
||||
|
c.Send("ZADD", "zset", i, member) |
||||
|
} |
||||
|
if _, err := c.Do(""); err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// Pop using WATCH/MULTI/EXEC
|
||||
|
|
||||
|
v, err := zpop(c, "zset") |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
fmt.Println(v) |
||||
|
|
||||
|
// Pop using a script.
|
||||
|
|
||||
|
v, err = redis.String(zpopScript.Do(c, "zset")) |
||||
|
if err != nil { |
||||
|
fmt.Println(err) |
||||
|
return |
||||
|
} |
||||
|
fmt.Println(v) |
||||
|
|
||||
|
// Output:
|
||||
|
// red
|
||||
|
// blue
|
||||
|
} |
@ -0,0 +1,15 @@ |
|||||
|
# This is the official list of Snappy-Go authors for copyright purposes. |
||||
|
# This file is distinct from the CONTRIBUTORS files. |
||||
|
# See the latter for an explanation. |
||||
|
|
||||
|
# Names should be added to this file as |
||||
|
# Name or Organization <email address> |
||||
|
# The email address is not required for organizations. |
||||
|
|
||||
|
# Please keep the list sorted. |
||||
|
|
||||
|
Damian Gryski <dgryski@gmail.com> |
||||
|
Google Inc. |
||||
|
Jan Mercl <0xjnml@gmail.com> |
||||
|
Rodolfo Carvalho <rhcarvalho@gmail.com> |
||||
|
Sebastien Binet <seb.binet@gmail.com> |
@ -0,0 +1,37 @@ |
|||||
|
# This is the official list of people who can contribute |
||||
|
# (and typically have contributed) code to the Snappy-Go repository. |
||||
|
# The AUTHORS file lists the copyright holders; this file |
||||
|
# lists people. For example, Google employees are listed here |
||||
|
# but not in AUTHORS, because Google holds the copyright. |
||||
|
# |
||||
|
# The submission process automatically checks to make sure |
||||
|
# that people submitting code are listed in this file (by email address). |
||||
|
# |
||||
|
# Names should be added to this file only after verifying that |
||||
|
# the individual or the individual's organization has agreed to |
||||
|
# the appropriate Contributor License Agreement, found here: |
||||
|
# |
||||
|
# http://code.google.com/legal/individual-cla-v1.0.html |
||||
|
# http://code.google.com/legal/corporate-cla-v1.0.html |
||||
|
# |
||||
|
# The agreement for individuals can be filled out on the web. |
||||
|
# |
||||
|
# When adding J Random Contributor's name to this file, |
||||
|
# either J's name or J's organization's name should be |
||||
|
# added to the AUTHORS file, depending on whether the |
||||
|
# individual or corporate CLA was used. |
||||
|
|
||||
|
# Names should be added to this file like so: |
||||
|
# Name <email address> |
||||
|
|
||||
|
# Please keep the list sorted. |
||||
|
|
||||
|
Damian Gryski <dgryski@gmail.com> |
||||
|
Jan Mercl <0xjnml@gmail.com> |
||||
|
Kai Backman <kaib@golang.org> |
||||
|
Marc-Antoine Ruel <maruel@chromium.org> |
||||
|
Nigel Tao <nigeltao@golang.org> |
||||
|
Rob Pike <r@golang.org> |
||||
|
Rodolfo Carvalho <rhcarvalho@gmail.com> |
||||
|
Russ Cox <rsc@golang.org> |
||||
|
Sebastien Binet <seb.binet@gmail.com> |
@ -0,0 +1,27 @@ |
|||||
|
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. |
||||
|
|
||||
|
Redistribution and use in source and binary forms, with or without |
||||
|
modification, are permitted provided that the following conditions are |
||||
|
met: |
||||
|
|
||||
|
* Redistributions of source code must retain the above copyright |
||||
|
notice, this list of conditions and the following disclaimer. |
||||
|
* Redistributions in binary form must reproduce the above |
||||
|
copyright notice, this list of conditions and the following disclaimer |
||||
|
in the documentation and/or other materials provided with the |
||||
|
distribution. |
||||
|
* Neither the name of Google Inc. nor the names of its |
||||
|
contributors may be used to endorse or promote products derived from |
||||
|
this software without specific prior written permission. |
||||
|
|
||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,107 @@ |
|||||
|
The Snappy compression format in the Go programming language. |
||||
|
|
||||
|
To download and install from source: |
||||
|
$ go get github.com/golang/snappy |
||||
|
|
||||
|
Unless otherwise noted, the Snappy-Go source files are distributed |
||||
|
under the BSD-style license found in the LICENSE file. |
||||
|
|
||||
|
|
||||
|
|
||||
|
Benchmarks. |
||||
|
|
||||
|
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten |
||||
|
or so files, the same set used by the C++ Snappy code (github.com/google/snappy |
||||
|
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ |
||||
|
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: |
||||
|
|
||||
|
"go test -test.bench=." |
||||
|
|
||||
|
_UFlat0-8 2.19GB/s ± 0% html |
||||
|
_UFlat1-8 1.41GB/s ± 0% urls |
||||
|
_UFlat2-8 23.5GB/s ± 2% jpg |
||||
|
_UFlat3-8 1.91GB/s ± 0% jpg_200 |
||||
|
_UFlat4-8 14.0GB/s ± 1% pdf |
||||
|
_UFlat5-8 1.97GB/s ± 0% html4 |
||||
|
_UFlat6-8 814MB/s ± 0% txt1 |
||||
|
_UFlat7-8 785MB/s ± 0% txt2 |
||||
|
_UFlat8-8 857MB/s ± 0% txt3 |
||||
|
_UFlat9-8 719MB/s ± 1% txt4 |
||||
|
_UFlat10-8 2.84GB/s ± 0% pb |
||||
|
_UFlat11-8 1.05GB/s ± 0% gaviota |
||||
|
|
||||
|
_ZFlat0-8 1.04GB/s ± 0% html |
||||
|
_ZFlat1-8 534MB/s ± 0% urls |
||||
|
_ZFlat2-8 15.7GB/s ± 1% jpg |
||||
|
_ZFlat3-8 740MB/s ± 3% jpg_200 |
||||
|
_ZFlat4-8 9.20GB/s ± 1% pdf |
||||
|
_ZFlat5-8 991MB/s ± 0% html4 |
||||
|
_ZFlat6-8 379MB/s ± 0% txt1 |
||||
|
_ZFlat7-8 352MB/s ± 0% txt2 |
||||
|
_ZFlat8-8 396MB/s ± 1% txt3 |
||||
|
_ZFlat9-8 327MB/s ± 1% txt4 |
||||
|
_ZFlat10-8 1.33GB/s ± 1% pb |
||||
|
_ZFlat11-8 605MB/s ± 1% gaviota |
||||
|
|
||||
|
|
||||
|
|
||||
|
"go test -test.bench=. -tags=noasm" |
||||
|
|
||||
|
_UFlat0-8 621MB/s ± 2% html |
||||
|
_UFlat1-8 494MB/s ± 1% urls |
||||
|
_UFlat2-8 23.2GB/s ± 1% jpg |
||||
|
_UFlat3-8 1.12GB/s ± 1% jpg_200 |
||||
|
_UFlat4-8 4.35GB/s ± 1% pdf |
||||
|
_UFlat5-8 609MB/s ± 0% html4 |
||||
|
_UFlat6-8 296MB/s ± 0% txt1 |
||||
|
_UFlat7-8 288MB/s ± 0% txt2 |
||||
|
_UFlat8-8 309MB/s ± 1% txt3 |
||||
|
_UFlat9-8 280MB/s ± 1% txt4 |
||||
|
_UFlat10-8 753MB/s ± 0% pb |
||||
|
_UFlat11-8 400MB/s ± 0% gaviota |
||||
|
|
||||
|
_ZFlat0-8 409MB/s ± 1% html |
||||
|
_ZFlat1-8 250MB/s ± 1% urls |
||||
|
_ZFlat2-8 12.3GB/s ± 1% jpg |
||||
|
_ZFlat3-8 132MB/s ± 0% jpg_200 |
||||
|
_ZFlat4-8 2.92GB/s ± 0% pdf |
||||
|
_ZFlat5-8 405MB/s ± 1% html4 |
||||
|
_ZFlat6-8 179MB/s ± 1% txt1 |
||||
|
_ZFlat7-8 170MB/s ± 1% txt2 |
||||
|
_ZFlat8-8 189MB/s ± 1% txt3 |
||||
|
_ZFlat9-8 164MB/s ± 1% txt4 |
||||
|
_ZFlat10-8 479MB/s ± 1% pb |
||||
|
_ZFlat11-8 270MB/s ± 1% gaviota |
||||
|
|
||||
|
|
||||
|
|
||||
|
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here |
||||
|
are the numbers from C++ Snappy's |
||||
|
|
||||
|
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log |
||||
|
|
||||
|
BM_UFlat/0 2.4GB/s html |
||||
|
BM_UFlat/1 1.4GB/s urls |
||||
|
BM_UFlat/2 21.8GB/s jpg |
||||
|
BM_UFlat/3 1.5GB/s jpg_200 |
||||
|
BM_UFlat/4 13.3GB/s pdf |
||||
|
BM_UFlat/5 2.1GB/s html4 |
||||
|
BM_UFlat/6 1.0GB/s txt1 |
||||
|
BM_UFlat/7 959.4MB/s txt2 |
||||
|
BM_UFlat/8 1.0GB/s txt3 |
||||
|
BM_UFlat/9 864.5MB/s txt4 |
||||
|
BM_UFlat/10 2.9GB/s pb |
||||
|
BM_UFlat/11 1.2GB/s gaviota |
||||
|
|
||||
|
BM_ZFlat/0 944.3MB/s html (22.31 %) |
||||
|
BM_ZFlat/1 501.6MB/s urls (47.78 %) |
||||
|
BM_ZFlat/2 14.3GB/s jpg (99.95 %) |
||||
|
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) |
||||
|
BM_ZFlat/4 8.3GB/s pdf (83.30 %) |
||||
|
BM_ZFlat/5 903.5MB/s html4 (22.52 %) |
||||
|
BM_ZFlat/6 336.0MB/s txt1 (57.88 %) |
||||
|
BM_ZFlat/7 312.3MB/s txt2 (61.91 %) |
||||
|
BM_ZFlat/8 353.1MB/s txt3 (54.99 %) |
||||
|
BM_ZFlat/9 289.9MB/s txt4 (66.26 %) |
||||
|
BM_ZFlat/10 1.2GB/s pb (19.68 %) |
||||
|
BM_ZFlat/11 527.4MB/s gaviota (37.72 %) |
@ -0,0 +1,77 @@ |
|||||
|
/*
|
||||
|
To build the snappytool binary: |
||||
|
g++ main.cpp /usr/lib/libsnappy.a -o snappytool |
||||
|
or, if you have built the C++ snappy library from source: |
||||
|
g++ main.cpp /path/to/your/snappy/.libs/libsnappy.a -o snappytool |
||||
|
after running "make" from your snappy checkout directory. |
||||
|
*/ |
||||
|
|
||||
|
#include <errno.h>
|
||||
|
#include <stdio.h>
|
||||
|
#include <string.h>
|
||||
|
#include <unistd.h>
|
||||
|
|
||||
|
#include "snappy.h"
|
||||
|
|
||||
|
#define N 1000000
|
||||
|
|
||||
|
char dst[N]; |
||||
|
char src[N]; |
||||
|
|
||||
|
int main(int argc, char** argv) { |
||||
|
// Parse args.
|
||||
|
if (argc != 2) { |
||||
|
fprintf(stderr, "exactly one of -d or -e must be given\n"); |
||||
|
return 1; |
||||
|
} |
||||
|
bool decode = strcmp(argv[1], "-d") == 0; |
||||
|
bool encode = strcmp(argv[1], "-e") == 0; |
||||
|
if (decode == encode) { |
||||
|
fprintf(stderr, "exactly one of -d or -e must be given\n"); |
||||
|
return 1; |
||||
|
} |
||||
|
|
||||
|
// Read all of stdin into src[:s].
|
||||
|
size_t s = 0; |
||||
|
while (1) { |
||||
|
if (s == N) { |
||||
|
fprintf(stderr, "input too large\n"); |
||||
|
return 1; |
||||
|
} |
||||
|
ssize_t n = read(0, src+s, N-s); |
||||
|
if (n == 0) { |
||||
|
break; |
||||
|
} |
||||
|
if (n < 0) { |
||||
|
fprintf(stderr, "read error: %s\n", strerror(errno)); |
||||
|
// TODO: handle EAGAIN, EINTR?
|
||||
|
return 1; |
||||
|
} |
||||
|
s += n; |
||||
|
} |
||||
|
|
||||
|
// Encode or decode src[:s] to dst[:d], and write to stdout.
|
||||
|
size_t d = 0; |
||||
|
if (encode) { |
||||
|
if (N < snappy::MaxCompressedLength(s)) { |
||||
|
fprintf(stderr, "input too large after encoding\n"); |
||||
|
return 1; |
||||
|
} |
||||
|
snappy::RawCompress(src, s, dst, &d); |
||||
|
} else { |
||||
|
if (!snappy::GetUncompressedLength(src, s, &d)) { |
||||
|
fprintf(stderr, "could not get uncompressed length\n"); |
||||
|
return 1; |
||||
|
} |
||||
|
if (N < d) { |
||||
|
fprintf(stderr, "input too large after decoding\n"); |
||||
|
return 1; |
||||
|
} |
||||
|
if (!snappy::RawUncompress(src, s, dst)) { |
||||
|
fprintf(stderr, "input was not valid Snappy-compressed data\n"); |
||||
|
return 1; |
||||
|
} |
||||
|
} |
||||
|
write(1, dst, d); |
||||
|
return 0; |
||||
|
} |
@ -0,0 +1,237 @@ |
|||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
// Use of this source code is governed by a BSD-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
package snappy |
||||
|
|
||||
|
import ( |
||||
|
"encoding/binary" |
||||
|
"errors" |
||||
|
"io" |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
// ErrCorrupt reports that the input is invalid.
|
||||
|
ErrCorrupt = errors.New("snappy: corrupt input") |
||||
|
// ErrTooLarge reports that the uncompressed length is too large.
|
||||
|
ErrTooLarge = errors.New("snappy: decoded block is too large") |
||||
|
// ErrUnsupported reports that the input isn't supported.
|
||||
|
ErrUnsupported = errors.New("snappy: unsupported input") |
||||
|
|
||||
|
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") |
||||
|
) |
||||
|
|
||||
|
// DecodedLen returns the length of the decoded block.
|
||||
|
func DecodedLen(src []byte) (int, error) { |
||||
|
v, _, err := decodedLen(src) |
||||
|
return v, err |
||||
|
} |
||||
|
|
||||
|
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
|
// that the length header occupied.
|
||||
|
func decodedLen(src []byte) (blockLen, headerLen int, err error) { |
||||
|
v, n := binary.Uvarint(src) |
||||
|
if n <= 0 || v > 0xffffffff { |
||||
|
return 0, 0, ErrCorrupt |
||||
|
} |
||||
|
|
||||
|
const wordSize = 32 << (^uint(0) >> 32 & 1) |
||||
|
if wordSize == 32 && v > 0x7fffffff { |
||||
|
return 0, 0, ErrTooLarge |
||||
|
} |
||||
|
return int(v), n, nil |
||||
|
} |
||||
|
|
||||
|
const ( |
||||
|
decodeErrCodeCorrupt = 1 |
||||
|
decodeErrCodeUnsupportedLiteralLength = 2 |
||||
|
) |
||||
|
|
||||
|
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
|
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||
|
//
|
||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
|
func Decode(dst, src []byte) ([]byte, error) { |
||||
|
dLen, s, err := decodedLen(src) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
if dLen <= len(dst) { |
||||
|
dst = dst[:dLen] |
||||
|
} else { |
||||
|
dst = make([]byte, dLen) |
||||
|
} |
||||
|
switch decode(dst, src[s:]) { |
||||
|
case 0: |
||||
|
return dst, nil |
||||
|
case decodeErrCodeUnsupportedLiteralLength: |
||||
|
return nil, errUnsupportedLiteralLength |
||||
|
} |
||||
|
return nil, ErrCorrupt |
||||
|
} |
||||
|
|
||||
|
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||
|
// format described at
|
||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
|
func NewReader(r io.Reader) *Reader { |
||||
|
return &Reader{ |
||||
|
r: r, |
||||
|
decoded: make([]byte, maxBlockSize), |
||||
|
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||
|
type Reader struct { |
||||
|
r io.Reader |
||||
|
err error |
||||
|
decoded []byte |
||||
|
buf []byte |
||||
|
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||
|
i, j int |
||||
|
readHeader bool |
||||
|
} |
||||
|
|
||||
|
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||
|
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||
|
// a new one.
|
||||
|
func (r *Reader) Reset(reader io.Reader) { |
||||
|
r.r = reader |
||||
|
r.err = nil |
||||
|
r.i = 0 |
||||
|
r.j = 0 |
||||
|
r.readHeader = false |
||||
|
} |
||||
|
|
||||
|
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { |
||||
|
if _, r.err = io.ReadFull(r.r, p); r.err != nil { |
||||
|
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { |
||||
|
r.err = ErrCorrupt |
||||
|
} |
||||
|
return false |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
// Read satisfies the io.Reader interface.
|
||||
|
func (r *Reader) Read(p []byte) (int, error) { |
||||
|
if r.err != nil { |
||||
|
return 0, r.err |
||||
|
} |
||||
|
for { |
||||
|
if r.i < r.j { |
||||
|
n := copy(p, r.decoded[r.i:r.j]) |
||||
|
r.i += n |
||||
|
return n, nil |
||||
|
} |
||||
|
if !r.readFull(r.buf[:4], true) { |
||||
|
return 0, r.err |
||||
|
} |
||||
|
chunkType := r.buf[0] |
||||
|
if !r.readHeader { |
||||
|
if chunkType != chunkTypeStreamIdentifier { |
||||
|
r.err = ErrCorrupt |
||||
|
return 0, r.err |
||||
|
} |
||||
|
r.readHeader = true |
||||
|
} |
||||
|
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 |
||||
|
if chunkLen > len(r.buf) { |
||||
|
r.err = ErrUnsupported |
||||
|
return 0, r.err |
||||
|
} |
||||
|
|
||||
|
// The chunk types are specified at
|
||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
|
switch chunkType { |
||||
|
case chunkTypeCompressedData: |
||||
|
// Section 4.2. Compressed data (chunk type 0x00).
|
||||
|
if chunkLen < checksumSize { |
||||
|
r.err = ErrCorrupt |
||||
|
return 0, r.err |
||||
|
} |
||||
|
buf := r.buf[:chunkLen] |
||||
|
if !r.readFull(buf, false) { |
||||
|
return 0, r.err |
||||
|
} |
||||
|
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 |
||||
|
buf = buf[checksumSize:] |
||||
|
|
||||
|
n, err := DecodedLen(buf) |
||||
|
if err != nil { |
||||
|
r.err = err |
||||
|
return 0, r.err |
||||
|
} |
||||
|
if n > len(r.decoded) { |
||||
|
r.err = ErrCorrupt |
||||
|
return 0, r.err |
||||
|
} |
||||
|
if _, err := Decode(r.decoded, buf); err != nil { |
||||
|
r.err = err |
||||
|
return 0, r.err |
||||
|
} |
||||
|
if crc(r.decoded[:n]) != checksum { |
||||
|
r.err = ErrCorrupt |
||||
|
return 0, r.err |
||||
|
} |
||||
|
r.i, r.j = 0, n |
||||
|
continue |
||||
|
|
||||
|
case chunkTypeUncompressedData: |
||||
|
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||
|
if chunkLen < checksumSize { |
||||
|
r.err = ErrCorrupt |
||||
|
return 0, r.err |
||||
|
} |
||||
|
buf := r.buf[:checksumSize] |
||||
|
if !r.readFull(buf, false) { |
||||
|
return 0, r.err |
||||
|
} |
||||
|
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 |
||||
|
// Read directly into r.decoded instead of via r.buf.
|
||||
|
n := chunkLen - checksumSize |
||||
|
if n > len(r.decoded) { |
||||
|
r.err = ErrCorrupt |
||||
|
return 0, r.err |
||||
|
} |
||||
|
if !r.readFull(r.decoded[:n], false) { |
||||
|
return 0, r.err |
||||
|
} |
||||
|
if crc(r.decoded[:n]) != checksum { |
||||
|
r.err = ErrCorrupt |
||||
|
return 0, r.err |
||||
|
} |
||||
|
r.i, r.j = 0, n |
||||
|
continue |
||||
|
|
||||
|
case chunkTypeStreamIdentifier: |
||||
|
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||
|
if chunkLen != len(magicBody) { |
||||
|
r.err = ErrCorrupt |
||||
|
return 0, r.err |
||||
|
} |
||||
|
if !r.readFull(r.buf[:len(magicBody)], false) { |
||||
|
return 0, r.err |
||||
|
} |
||||
|
for i := 0; i < len(magicBody); i++ { |
||||
|
if r.buf[i] != magicBody[i] { |
||||
|
r.err = ErrCorrupt |
||||
|
return 0, r.err |
||||
|
} |
||||
|
} |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
if chunkType <= 0x7f { |
||||
|
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||
|
r.err = ErrUnsupported |
||||
|
return 0, r.err |
||||
|
} |
||||
|
// Section 4.4 Padding (chunk type 0xfe).
|
||||
|
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||
|
if !r.readFull(r.buf[:chunkLen], false) { |
||||
|
return 0, r.err |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,14 @@ |
|||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
|
// Use of this source code is governed by a BSD-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
// +build !appengine
|
||||
|
// +build gc
|
||||
|
// +build !noasm
|
||||
|
|
||||
|
package snappy |
||||
|
|
||||
|
// decode has the same semantics as in decode_other.go.
|
||||
|
//
|
||||
|
//go:noescape
|
||||
|
func decode(dst, src []byte) int |
@ -0,0 +1,490 @@ |
|||||
|
// Copyright 2016 The Go Authors. All rights reserved. |
||||
|
// Use of this source code is governed by a BSD-style |
||||
|
// license that can be found in the LICENSE file. |
||||
|
|
||||
|
// +build !appengine |
||||
|
// +build gc |
||||
|
// +build !noasm |
||||
|
|
||||
|
#include "textflag.h" |
||||
|
|
||||
|
// The asm code generally follows the pure Go code in decode_other.go, except |
||||
|
// where marked with a "!!!". |
||||
|
|
||||
|
// func decode(dst, src []byte) int |
||||
|
// |
||||
|
// All local variables fit into registers. The non-zero stack size is only to |
||||
|
// spill registers and push args when issuing a CALL. The register allocation: |
||||
|
// - AX scratch |
||||
|
// - BX scratch |
||||
|
// - CX length or x |
||||
|
// - DX offset |
||||
|
// - SI &src[s] |
||||
|
// - DI &dst[d] |
||||
|
// + R8 dst_base |
||||
|
// + R9 dst_len |
||||
|
// + R10 dst_base + dst_len |
||||
|
// + R11 src_base |
||||
|
// + R12 src_len |
||||
|
// + R13 src_base + src_len |
||||
|
// - R14 used by doCopy |
||||
|
// - R15 used by doCopy |
||||
|
// |
||||
|
// The registers R8-R13 (marked with a "+") are set at the start of the |
||||
|
// function, and after a CALL returns, and are not otherwise modified. |
||||
|
// |
||||
|
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. |
||||
|
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. |
||||
|
TEXT ·decode(SB), NOSPLIT, $48-56 |
||||
|
// Initialize SI, DI and R8-R13. |
||||
|
MOVQ dst_base+0(FP), R8 |
||||
|
MOVQ dst_len+8(FP), R9 |
||||
|
MOVQ R8, DI |
||||
|
MOVQ R8, R10 |
||||
|
ADDQ R9, R10 |
||||
|
MOVQ src_base+24(FP), R11 |
||||
|
MOVQ src_len+32(FP), R12 |
||||
|
MOVQ R11, SI |
||||
|
MOVQ R11, R13 |
||||
|
ADDQ R12, R13 |
||||
|
|
||||
|
loop: |
||||
|
// for s < len(src) |
||||
|
CMPQ SI, R13 |
||||
|
JEQ end |
||||
|
|
||||
|
// CX = uint32(src[s]) |
||||
|
// |
||||
|
// switch src[s] & 0x03 |
||||
|
MOVBLZX (SI), CX |
||||
|
MOVL CX, BX |
||||
|
ANDL $3, BX |
||||
|
CMPL BX, $1 |
||||
|
JAE tagCopy |
||||
|
|
||||
|
// ---------------------------------------- |
||||
|
// The code below handles literal tags. |
||||
|
|
||||
|
// case tagLiteral: |
||||
|
// x := uint32(src[s] >> 2) |
||||
|
// switch |
||||
|
SHRL $2, CX |
||||
|
CMPL CX, $60 |
||||
|
JAE tagLit60Plus |
||||
|
|
||||
|
// case x < 60: |
||||
|
// s++ |
||||
|
INCQ SI |
||||
|
|
||||
|
doLit: |
||||
|
// This is the end of the inner "switch", when we have a literal tag. |
||||
|
// |
||||
|
// We assume that CX == x and x fits in a uint32, where x is the variable |
||||
|
// used in the pure Go decode_other.go code. |
||||
|
|
||||
|
// length = int(x) + 1 |
||||
|
// |
||||
|
// Unlike the pure Go code, we don't need to check if length <= 0 because |
||||
|
// CX can hold 64 bits, so the increment cannot overflow. |
||||
|
INCQ CX |
||||
|
|
||||
|
// Prepare to check if copying length bytes will run past the end of dst or |
||||
|
// src. |
||||
|
// |
||||
|
// AX = len(dst) - d |
||||
|
// BX = len(src) - s |
||||
|
MOVQ R10, AX |
||||
|
SUBQ DI, AX |
||||
|
MOVQ R13, BX |
||||
|
SUBQ SI, BX |
||||
|
|
||||
|
// !!! Try a faster technique for short (16 or fewer bytes) copies. |
||||
|
// |
||||
|
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { |
||||
|
// goto callMemmove // Fall back on calling runtime·memmove. |
||||
|
// } |
||||
|
// |
||||
|
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s |
||||
|
// against 21 instead of 16, because it cannot assume that all of its input |
||||
|
// is contiguous in memory and so it needs to leave enough source bytes to |
||||
|
// read the next tag without refilling buffers, but Go's Decode assumes |
||||
|
// contiguousness (the src argument is a []byte). |
||||
|
CMPQ CX, $16 |
||||
|
JGT callMemmove |
||||
|
CMPQ AX, $16 |
||||
|
JLT callMemmove |
||||
|
CMPQ BX, $16 |
||||
|
JLT callMemmove |
||||
|
|
||||
|
// !!! Implement the copy from src to dst as a 16-byte load and store. |
||||
|
// (Decode's documentation says that dst and src must not overlap.) |
||||
|
// |
||||
|
// This always copies 16 bytes, instead of only length bytes, but that's |
||||
|
// OK. If the input is a valid Snappy encoding then subsequent iterations |
||||
|
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a |
||||
|
// non-nil error), so the overrun will be ignored. |
||||
|
// |
||||
|
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or |
||||
|
// 16-byte loads and stores. This technique probably wouldn't be as |
||||
|
// effective on architectures that are fussier about alignment. |
||||
|
MOVOU 0(SI), X0 |
||||
|
MOVOU X0, 0(DI) |
||||
|
|
||||
|
// d += length |
||||
|
// s += length |
||||
|
ADDQ CX, DI |
||||
|
ADDQ CX, SI |
||||
|
JMP loop |
||||
|
|
||||
|
callMemmove: |
||||
|
// if length > len(dst)-d || length > len(src)-s { etc } |
||||
|
CMPQ CX, AX |
||||
|
JGT errCorrupt |
||||
|
CMPQ CX, BX |
||||
|
JGT errCorrupt |
||||
|
|
||||
|
// copy(dst[d:], src[s:s+length]) |
||||
|
// |
||||
|
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push |
||||
|
// DI, SI and CX as arguments. Coincidentally, we also need to spill those |
||||
|
// three registers to the stack, to save local variables across the CALL. |
||||
|
MOVQ DI, 0(SP) |
||||
|
MOVQ SI, 8(SP) |
||||
|
MOVQ CX, 16(SP) |
||||
|
MOVQ DI, 24(SP) |
||||
|
MOVQ SI, 32(SP) |
||||
|
MOVQ CX, 40(SP) |
||||
|
CALL runtime·memmove(SB) |
||||
|
|
||||
|
// Restore local variables: unspill registers from the stack and |
||||
|
// re-calculate R8-R13. |
||||
|
MOVQ 24(SP), DI |
||||
|
MOVQ 32(SP), SI |
||||
|
MOVQ 40(SP), CX |
||||
|
MOVQ dst_base+0(FP), R8 |
||||
|
MOVQ dst_len+8(FP), R9 |
||||
|
MOVQ R8, R10 |
||||
|
ADDQ R9, R10 |
||||
|
MOVQ src_base+24(FP), R11 |
||||
|
MOVQ src_len+32(FP), R12 |
||||
|
MOVQ R11, R13 |
||||
|
ADDQ R12, R13 |
||||
|
|
||||
|
// d += length |
||||
|
// s += length |
||||
|
ADDQ CX, DI |
||||
|
ADDQ CX, SI |
||||
|
JMP loop |
||||
|
|
||||
|
tagLit60Plus: |
||||
|
// !!! This fragment does the |
||||
|
// |
||||
|
// s += x - 58; if uint(s) > uint(len(src)) { etc } |
||||
|
// |
||||
|
// checks. In the asm version, we code it once instead of once per switch case. |
||||
|
ADDQ CX, SI |
||||
|
SUBQ $58, SI |
||||
|
MOVQ SI, BX |
||||
|
SUBQ R11, BX |
||||
|
CMPQ BX, R12 |
||||
|
JA errCorrupt |
||||
|
|
||||
|
// case x == 60: |
||||
|
CMPL CX, $61 |
||||
|
JEQ tagLit61 |
||||
|
JA tagLit62Plus |
||||
|
|
||||
|
// x = uint32(src[s-1]) |
||||
|
MOVBLZX -1(SI), CX |
||||
|
JMP doLit |
||||
|
|
||||
|
tagLit61: |
||||
|
// case x == 61: |
||||
|
// x = uint32(src[s-2]) | uint32(src[s-1])<<8 |
||||
|
MOVWLZX -2(SI), CX |
||||
|
JMP doLit |
||||
|
|
||||
|
tagLit62Plus: |
||||
|
CMPL CX, $62 |
||||
|
JA tagLit63 |
||||
|
|
||||
|
// case x == 62: |
||||
|
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 |
||||
|
MOVWLZX -3(SI), CX |
||||
|
MOVBLZX -1(SI), BX |
||||
|
SHLL $16, BX |
||||
|
ORL BX, CX |
||||
|
JMP doLit |
||||
|
|
||||
|
tagLit63: |
||||
|
// case x == 63: |
||||
|
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 |
||||
|
MOVL -4(SI), CX |
||||
|
JMP doLit |
||||
|
|
||||
|
// The code above handles literal tags. |
||||
|
// ---------------------------------------- |
||||
|
// The code below handles copy tags. |
||||
|
|
||||
|
tagCopy4: |
||||
|
// case tagCopy4: |
||||
|
// s += 5 |
||||
|
ADDQ $5, SI |
||||
|
|
||||
|
// if uint(s) > uint(len(src)) { etc } |
||||
|
MOVQ SI, BX |
||||
|
SUBQ R11, BX |
||||
|
CMPQ BX, R12 |
||||
|
JA errCorrupt |
||||
|
|
||||
|
// length = 1 + int(src[s-5])>>2 |
||||
|
SHRQ $2, CX |
||||
|
INCQ CX |
||||
|
|
||||
|
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) |
||||
|
MOVLQZX -4(SI), DX |
||||
|
JMP doCopy |
||||
|
|
||||
|
tagCopy2: |
||||
|
// case tagCopy2: |
||||
|
// s += 3 |
||||
|
ADDQ $3, SI |
||||
|
|
||||
|
// if uint(s) > uint(len(src)) { etc } |
||||
|
MOVQ SI, BX |
||||
|
SUBQ R11, BX |
||||
|
CMPQ BX, R12 |
||||
|
JA errCorrupt |
||||
|
|
||||
|
// length = 1 + int(src[s-3])>>2 |
||||
|
SHRQ $2, CX |
||||
|
INCQ CX |
||||
|
|
||||
|
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) |
||||
|
MOVWQZX -2(SI), DX |
||||
|
JMP doCopy |
||||
|
|
||||
|
tagCopy: |
||||
|
// We have a copy tag. We assume that: |
||||
|
// - BX == src[s] & 0x03 |
||||
|
// - CX == src[s] |
||||
|
CMPQ BX, $2 |
||||
|
JEQ tagCopy2 |
||||
|
JA tagCopy4 |
||||
|
|
||||
|
// case tagCopy1: |
||||
|
// s += 2 |
||||
|
ADDQ $2, SI |
||||
|
|
||||
|
// if uint(s) > uint(len(src)) { etc } |
||||
|
MOVQ SI, BX |
||||
|
SUBQ R11, BX |
||||
|
CMPQ BX, R12 |
||||
|
JA errCorrupt |
||||
|
|
||||
|
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) |
||||
|
MOVQ CX, DX |
||||
|
ANDQ $0xe0, DX |
||||
|
SHLQ $3, DX |
||||
|
MOVBQZX -1(SI), BX |
||||
|
ORQ BX, DX |
||||
|
|
||||
|
// length = 4 + int(src[s-2])>>2&0x7 |
||||
|
SHRQ $2, CX |
||||
|
ANDQ $7, CX |
||||
|
ADDQ $4, CX |
||||
|
|
||||
|
doCopy: |
||||
|
// This is the end of the outer "switch", when we have a copy tag. |
||||
|
// |
||||
|
// We assume that: |
||||
|
// - CX == length && CX > 0 |
||||
|
// - DX == offset |
||||
|
|
||||
|
// if offset <= 0 { etc } |
||||
|
CMPQ DX, $0 |
||||
|
JLE errCorrupt |
||||
|
|
||||
|
// if d < offset { etc } |
||||
|
MOVQ DI, BX |
||||
|
SUBQ R8, BX |
||||
|
CMPQ BX, DX |
||||
|
JLT errCorrupt |
||||
|
|
||||
|
// if length > len(dst)-d { etc } |
||||
|
MOVQ R10, BX |
||||
|
SUBQ DI, BX |
||||
|
CMPQ CX, BX |
||||
|
JGT errCorrupt |
||||
|
|
||||
|
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length |
||||
|
// |
||||
|
// Set: |
||||
|
// - R14 = len(dst)-d |
||||
|
// - R15 = &dst[d-offset] |
||||
|
MOVQ R10, R14 |
||||
|
SUBQ DI, R14 |
||||
|
MOVQ DI, R15 |
||||
|
SUBQ DX, R15 |
||||
|
|
||||
|
// !!! Try a faster technique for short (16 or fewer bytes) forward copies. |
||||
|
// |
||||
|
// First, try using two 8-byte load/stores, similar to the doLit technique |
||||
|
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is |
||||
|
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores |
||||
|
// and not one 16-byte load/store, and the first store has to be before the |
||||
|
// second load, due to the overlap if offset is in the range [8, 16). |
||||
|
// |
||||
|
// if length > 16 || offset < 8 || len(dst)-d < 16 { |
||||
|
// goto slowForwardCopy |
||||
|
// } |
||||
|
// copy 16 bytes |
||||
|
// d += length |
||||
|
CMPQ CX, $16 |
||||
|
JGT slowForwardCopy |
||||
|
CMPQ DX, $8 |
||||
|
JLT slowForwardCopy |
||||
|
CMPQ R14, $16 |
||||
|
JLT slowForwardCopy |
||||
|
MOVQ 0(R15), AX |
||||
|
MOVQ AX, 0(DI) |
||||
|
MOVQ 8(R15), BX |
||||
|
MOVQ BX, 8(DI) |
||||
|
ADDQ CX, DI |
||||
|
JMP loop |
||||
|
|
||||
|
slowForwardCopy: |
||||
|
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we |
||||
|
// can still try 8-byte load stores, provided we can overrun up to 10 extra |
||||
|
// bytes. As above, the overrun will be fixed up by subsequent iterations |
||||
|
// of the outermost loop. |
||||
|
// |
||||
|
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its |
||||
|
// commentary says: |
||||
|
// |
||||
|
// ---- |
||||
|
// |
||||
|
// The main part of this loop is a simple copy of eight bytes at a time |
||||
|
// until we've copied (at least) the requested amount of bytes. However, |
||||
|
// if d and d-offset are less than eight bytes apart (indicating a |
||||
|
// repeating pattern of length < 8), we first need to expand the pattern in |
||||
|
// order to get the correct results. For instance, if the buffer looks like |
||||
|
// this, with the eight-byte <d-offset> and <d> patterns marked as |
||||
|
// intervals: |
||||
|
// |
||||
|
// abxxxxxxxxxxxx |
||||
|
// [------] d-offset |
||||
|
// [------] d |
||||
|
// |
||||
|
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern |
||||
|
// once, after which we can move <d> two bytes without moving <d-offset>: |
||||
|
// |
||||
|
// ababxxxxxxxxxx |
||||
|
// [------] d-offset |
||||
|
// [------] d |
||||
|
// |
||||
|
// and repeat the exercise until the two no longer overlap. |
||||
|
// |
||||
|
// This allows us to do very well in the special case of one single byte |
||||
|
// repeated many times, without taking a big hit for more general cases. |
||||
|
// |
||||
|
// The worst case of extra writing past the end of the match occurs when |
||||
|
// offset == 1 and length == 1; the last copy will read from byte positions |
||||
|
// [0..7] and write to [4..11], whereas it was only supposed to write to |
||||
|
// position 1. Thus, ten excess bytes. |
||||
|
// |
||||
|
// ---- |
||||
|
// |
||||
|
// That "10 byte overrun" worst case is confirmed by Go's |
||||
|
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy |
||||
|
// and finishSlowForwardCopy algorithm. |
||||
|
// |
||||
|
// if length > len(dst)-d-10 { |
||||
|
// goto verySlowForwardCopy |
||||
|
// } |
||||
|
SUBQ $10, R14 |
||||
|
CMPQ CX, R14 |
||||
|
JGT verySlowForwardCopy |
||||
|
|
||||
|
makeOffsetAtLeast8: |
||||
|
// !!! As above, expand the pattern so that offset >= 8 and we can use |
||||
|
// 8-byte load/stores. |
||||
|
// |
||||
|
// for offset < 8 { |
||||
|
// copy 8 bytes from dst[d-offset:] to dst[d:] |
||||
|
// length -= offset |
||||
|
// d += offset |
||||
|
// offset += offset |
||||
|
// // The two previous lines together means that d-offset, and therefore |
||||
|
// // R15, is unchanged. |
||||
|
// } |
||||
|
CMPQ DX, $8 |
||||
|
JGE fixUpSlowForwardCopy |
||||
|
MOVQ (R15), BX |
||||
|
MOVQ BX, (DI) |
||||
|
SUBQ DX, CX |
||||
|
ADDQ DX, DI |
||||
|
ADDQ DX, DX |
||||
|
JMP makeOffsetAtLeast8 |
||||
|
|
||||
|
fixUpSlowForwardCopy: |
||||
|
// !!! Add length (which might be negative now) to d (implied by DI being |
||||
|
// &dst[d]) so that d ends up at the right place when we jump back to the |
||||
|
// top of the loop. Before we do that, though, we save DI to AX so that, if |
||||
|
// length is positive, copying the remaining length bytes will write to the |
||||
|
// right place. |
||||
|
MOVQ DI, AX |
||||
|
ADDQ CX, DI |
||||
|
|
||||
|
finishSlowForwardCopy: |
||||
|
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative |
||||
|
// length means that we overrun, but as above, that will be fixed up by |
||||
|
// subsequent iterations of the outermost loop. |
||||
|
CMPQ CX, $0 |
||||
|
JLE loop |
||||
|
MOVQ (R15), BX |
||||
|
MOVQ BX, (AX) |
||||
|
ADDQ $8, R15 |
||||
|
ADDQ $8, AX |
||||
|
SUBQ $8, CX |
||||
|
JMP finishSlowForwardCopy |
||||
|
|
||||
|
verySlowForwardCopy: |
||||
|
// verySlowForwardCopy is a simple implementation of forward copy. In C |
||||
|
// parlance, this is a do/while loop instead of a while loop, since we know |
||||
|
// that length > 0. In Go syntax: |
||||
|
// |
||||
|
// for { |
||||
|
// dst[d] = dst[d - offset] |
||||
|
// d++ |
||||
|
// length-- |
||||
|
// if length == 0 { |
||||
|
// break |
||||
|
// } |
||||
|
// } |
||||
|
MOVB (R15), BX |
||||
|
MOVB BX, (DI) |
||||
|
INCQ R15 |
||||
|
INCQ DI |
||||
|
DECQ CX |
||||
|
JNZ verySlowForwardCopy |
||||
|
JMP loop |
||||
|
|
||||
|
// The code above handles copy tags. |
||||
|
// ---------------------------------------- |
||||
|
|
||||
|
end: |
||||
|
// This is the end of the "for s < len(src)". |
||||
|
// |
||||
|
// if d != len(dst) { etc } |
||||
|
CMPQ DI, R10 |
||||
|
JNE errCorrupt |
||||
|
|
||||
|
// return 0 |
||||
|
MOVQ $0, ret+48(FP) |
||||
|
RET |
||||
|
|
||||
|
errCorrupt: |
||||
|
// return decodeErrCodeCorrupt |
||||
|
MOVQ $1, ret+48(FP) |
||||
|
RET |
@ -0,0 +1,101 @@ |
|||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
|
// Use of this source code is governed by a BSD-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
// +build !amd64 appengine !gc noasm
|
||||
|
|
||||
|
package snappy |
||||
|
|
||||
|
// decode writes the decoding of src to dst. It assumes that the varint-encoded
|
||||
|
// length of the decompressed bytes has already been read, and that len(dst)
|
||||
|
// equals that length.
|
||||
|
//
|
||||
|
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
|
||||
|
func decode(dst, src []byte) int { |
||||
|
var d, s, offset, length int |
||||
|
for s < len(src) { |
||||
|
switch src[s] & 0x03 { |
||||
|
case tagLiteral: |
||||
|
x := uint32(src[s] >> 2) |
||||
|
switch { |
||||
|
case x < 60: |
||||
|
s++ |
||||
|
case x == 60: |
||||
|
s += 2 |
||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
|
return decodeErrCodeCorrupt |
||||
|
} |
||||
|
x = uint32(src[s-1]) |
||||
|
case x == 61: |
||||
|
s += 3 |
||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
|
return decodeErrCodeCorrupt |
||||
|
} |
||||
|
x = uint32(src[s-2]) | uint32(src[s-1])<<8 |
||||
|
case x == 62: |
||||
|
s += 4 |
||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
|
return decodeErrCodeCorrupt |
||||
|
} |
||||
|
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 |
||||
|
case x == 63: |
||||
|
s += 5 |
||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
|
return decodeErrCodeCorrupt |
||||
|
} |
||||
|
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 |
||||
|
} |
||||
|
length = int(x) + 1 |
||||
|
if length <= 0 { |
||||
|
return decodeErrCodeUnsupportedLiteralLength |
||||
|
} |
||||
|
if length > len(dst)-d || length > len(src)-s { |
||||
|
return decodeErrCodeCorrupt |
||||
|
} |
||||
|
copy(dst[d:], src[s:s+length]) |
||||
|
d += length |
||||
|
s += length |
||||
|
continue |
||||
|
|
||||
|
case tagCopy1: |
||||
|
s += 2 |
||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
|
return decodeErrCodeCorrupt |
||||
|
} |
||||
|
length = 4 + int(src[s-2])>>2&0x7 |
||||
|
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) |
||||
|
|
||||
|
case tagCopy2: |
||||
|
s += 3 |
||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
|
return decodeErrCodeCorrupt |
||||
|
} |
||||
|
length = 1 + int(src[s-3])>>2 |
||||
|
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) |
||||
|
|
||||
|
case tagCopy4: |
||||
|
s += 5 |
||||
|
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
|
||||
|
return decodeErrCodeCorrupt |
||||
|
} |
||||
|
length = 1 + int(src[s-5])>>2 |
||||
|
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) |
||||
|
} |
||||
|
|
||||
|
if offset <= 0 || d < offset || length > len(dst)-d { |
||||
|
return decodeErrCodeCorrupt |
||||
|
} |
||||
|
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
|
||||
|
// the built-in copy function, this byte-by-byte copy always runs
|
||||
|
// forwards, even if the slices overlap. Conceptually, this is:
|
||||
|
//
|
||||
|
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
|
||||
|
for end := d + length; d != end; d++ { |
||||
|
dst[d] = dst[d-offset] |
||||
|
} |
||||
|
} |
||||
|
if d != len(dst) { |
||||
|
return decodeErrCodeCorrupt |
||||
|
} |
||||
|
return 0 |
||||
|
} |
@ -0,0 +1,285 @@ |
|||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
// Use of this source code is governed by a BSD-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
package snappy |
||||
|
|
||||
|
import ( |
||||
|
"encoding/binary" |
||||
|
"errors" |
||||
|
"io" |
||||
|
) |
||||
|
|
||||
|
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||
|
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||
|
// Otherwise, a newly allocated slice will be returned.
|
||||
|
//
|
||||
|
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
|
func Encode(dst, src []byte) []byte { |
||||
|
if n := MaxEncodedLen(len(src)); n < 0 { |
||||
|
panic(ErrTooLarge) |
||||
|
} else if len(dst) < n { |
||||
|
dst = make([]byte, n) |
||||
|
} |
||||
|
|
||||
|
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
|
d := binary.PutUvarint(dst, uint64(len(src))) |
||||
|
|
||||
|
for len(src) > 0 { |
||||
|
p := src |
||||
|
src = nil |
||||
|
if len(p) > maxBlockSize { |
||||
|
p, src = p[:maxBlockSize], p[maxBlockSize:] |
||||
|
} |
||||
|
if len(p) < minNonLiteralBlockSize { |
||||
|
d += emitLiteral(dst[d:], p) |
||||
|
} else { |
||||
|
d += encodeBlock(dst[d:], p) |
||||
|
} |
||||
|
} |
||||
|
return dst[:d] |
||||
|
} |
||||
|
|
||||
|
// inputMargin is the minimum number of extra input bytes to keep, inside
|
||||
|
// encodeBlock's inner loop. On some architectures, this margin lets us
|
||||
|
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
|
||||
|
// literals can be implemented as a single load to and store from a 16-byte
|
||||
|
// register. That literal's actual length can be as short as 1 byte, so this
|
||||
|
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
|
||||
|
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
|
||||
|
// that we don't overrun the dst and src buffers.
|
||||
|
const inputMargin = 16 - 1 |
||||
|
|
||||
|
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
|
||||
|
// could be encoded with a copy tag. This is the minimum with respect to the
|
||||
|
// algorithm used by encodeBlock, not a minimum enforced by the file format.
|
||||
|
//
|
||||
|
// The encoded output must start with at least a 1 byte literal, as there are
|
||||
|
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
|
||||
|
// from an emitCopy call in encodeBlock's main loop, would require at least
|
||||
|
// another inputMargin bytes, for the reason above: we want any emitLiteral
|
||||
|
// calls inside encodeBlock's main loop to use the fast path if possible, which
|
||||
|
// requires being able to overrun by inputMargin bytes. Thus,
|
||||
|
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
|
||||
|
//
|
||||
|
// The C++ code doesn't use this exact threshold, but it could, as discussed at
|
||||
|
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
|
||||
|
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
|
||||
|
// optimization. It should not affect the encoded form. This is tested by
|
||||
|
// TestSameEncodingAsCppShortCopies.
|
||||
|
const minNonLiteralBlockSize = 1 + 1 + inputMargin |
||||
|
|
||||
|
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||
|
// uncompressed length.
|
||||
|
//
|
||||
|
// It will return a negative value if srcLen is too large to encode.
|
||||
|
func MaxEncodedLen(srcLen int) int { |
||||
|
n := uint64(srcLen) |
||||
|
if n > 0xffffffff { |
||||
|
return -1 |
||||
|
} |
||||
|
// Compressed data can be defined as:
|
||||
|
// compressed := item* literal*
|
||||
|
// item := literal* copy
|
||||
|
//
|
||||
|
// The trailing literal sequence has a space blowup of at most 62/60
|
||||
|
// since a literal of length 60 needs one tag byte + one extra byte
|
||||
|
// for length information.
|
||||
|
//
|
||||
|
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||
|
// 4 bytes of data. Because of a special check in the encoding code,
|
||||
|
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||
|
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||
|
// to at most the 62/60 blowup for representing literals.
|
||||
|
//
|
||||
|
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||
|
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||
|
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||
|
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||
|
//
|
||||
|
// This last factor dominates the blowup, so the final estimate is:
|
||||
|
n = 32 + n + n/6 |
||||
|
if n > 0xffffffff { |
||||
|
return -1 |
||||
|
} |
||||
|
return int(n) |
||||
|
} |
||||
|
|
||||
|
var errClosed = errors.New("snappy: Writer is closed") |
||||
|
|
||||
|
// NewWriter returns a new Writer that compresses to w.
|
||||
|
//
|
||||
|
// The Writer returned does not buffer writes. There is no need to Flush or
|
||||
|
// Close such a Writer.
|
||||
|
//
|
||||
|
// Deprecated: the Writer returned is not suitable for many small writes, only
|
||||
|
// for few large writes. Use NewBufferedWriter instead, which is efficient
|
||||
|
// regardless of the frequency and shape of the writes, and remember to Close
|
||||
|
// that Writer when done.
|
||||
|
func NewWriter(w io.Writer) *Writer { |
||||
|
return &Writer{ |
||||
|
w: w, |
||||
|
obuf: make([]byte, obufLen), |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// NewBufferedWriter returns a new Writer that compresses to w, using the
|
||||
|
// framing format described at
|
||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
|
//
|
||||
|
// The Writer returned buffers writes. Users must call Close to guarantee all
|
||||
|
// data has been forwarded to the underlying io.Writer. They may also call
|
||||
|
// Flush zero or more times before calling Close.
|
||||
|
func NewBufferedWriter(w io.Writer) *Writer { |
||||
|
return &Writer{ |
||||
|
w: w, |
||||
|
ibuf: make([]byte, 0, maxBlockSize), |
||||
|
obuf: make([]byte, obufLen), |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Writer is an io.Writer than can write Snappy-compressed bytes.
|
||||
|
type Writer struct { |
||||
|
w io.Writer |
||||
|
err error |
||||
|
|
||||
|
// ibuf is a buffer for the incoming (uncompressed) bytes.
|
||||
|
//
|
||||
|
// Its use is optional. For backwards compatibility, Writers created by the
|
||||
|
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
|
||||
|
// therefore do not need to be Flush'ed or Close'd.
|
||||
|
ibuf []byte |
||||
|
|
||||
|
// obuf is a buffer for the outgoing (compressed) bytes.
|
||||
|
obuf []byte |
||||
|
|
||||
|
// wroteStreamHeader is whether we have written the stream header.
|
||||
|
wroteStreamHeader bool |
||||
|
} |
||||
|
|
||||
|
// Reset discards the writer's state and switches the Snappy writer to write to
|
||||
|
// w. This permits reusing a Writer rather than allocating a new one.
|
||||
|
func (w *Writer) Reset(writer io.Writer) { |
||||
|
w.w = writer |
||||
|
w.err = nil |
||||
|
if w.ibuf != nil { |
||||
|
w.ibuf = w.ibuf[:0] |
||||
|
} |
||||
|
w.wroteStreamHeader = false |
||||
|
} |
||||
|
|
||||
|
// Write satisfies the io.Writer interface.
|
||||
|
func (w *Writer) Write(p []byte) (nRet int, errRet error) { |
||||
|
if w.ibuf == nil { |
||||
|
// Do not buffer incoming bytes. This does not perform or compress well
|
||||
|
// if the caller of Writer.Write writes many small slices. This
|
||||
|
// behavior is therefore deprecated, but still supported for backwards
|
||||
|
// compatibility with code that doesn't explicitly Flush or Close.
|
||||
|
return w.write(p) |
||||
|
} |
||||
|
|
||||
|
// The remainder of this method is based on bufio.Writer.Write from the
|
||||
|
// standard library.
|
||||
|
|
||||
|
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { |
||||
|
var n int |
||||
|
if len(w.ibuf) == 0 { |
||||
|
// Large write, empty buffer.
|
||||
|
// Write directly from p to avoid copy.
|
||||
|
n, _ = w.write(p) |
||||
|
} else { |
||||
|
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) |
||||
|
w.ibuf = w.ibuf[:len(w.ibuf)+n] |
||||
|
w.Flush() |
||||
|
} |
||||
|
nRet += n |
||||
|
p = p[n:] |
||||
|
} |
||||
|
if w.err != nil { |
||||
|
return nRet, w.err |
||||
|
} |
||||
|
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) |
||||
|
w.ibuf = w.ibuf[:len(w.ibuf)+n] |
||||
|
nRet += n |
||||
|
return nRet, nil |
||||
|
} |
||||
|
|
||||
|
func (w *Writer) write(p []byte) (nRet int, errRet error) { |
||||
|
if w.err != nil { |
||||
|
return 0, w.err |
||||
|
} |
||||
|
for len(p) > 0 { |
||||
|
obufStart := len(magicChunk) |
||||
|
if !w.wroteStreamHeader { |
||||
|
w.wroteStreamHeader = true |
||||
|
copy(w.obuf, magicChunk) |
||||
|
obufStart = 0 |
||||
|
} |
||||
|
|
||||
|
var uncompressed []byte |
||||
|
if len(p) > maxBlockSize { |
||||
|
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] |
||||
|
} else { |
||||
|
uncompressed, p = p, nil |
||||
|
} |
||||
|
checksum := crc(uncompressed) |
||||
|
|
||||
|
// Compress the buffer, discarding the result if the improvement
|
||||
|
// isn't at least 12.5%.
|
||||
|
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) |
||||
|
chunkType := uint8(chunkTypeCompressedData) |
||||
|
chunkLen := 4 + len(compressed) |
||||
|
obufEnd := obufHeaderLen + len(compressed) |
||||
|
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { |
||||
|
chunkType = chunkTypeUncompressedData |
||||
|
chunkLen = 4 + len(uncompressed) |
||||
|
obufEnd = obufHeaderLen |
||||
|
} |
||||
|
|
||||
|
// Fill in the per-chunk header that comes before the body.
|
||||
|
w.obuf[len(magicChunk)+0] = chunkType |
||||
|
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) |
||||
|
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) |
||||
|
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) |
||||
|
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) |
||||
|
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) |
||||
|
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) |
||||
|
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) |
||||
|
|
||||
|
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { |
||||
|
w.err = err |
||||
|
return nRet, err |
||||
|
} |
||||
|
if chunkType == chunkTypeUncompressedData { |
||||
|
if _, err := w.w.Write(uncompressed); err != nil { |
||||
|
w.err = err |
||||
|
return nRet, err |
||||
|
} |
||||
|
} |
||||
|
nRet += len(uncompressed) |
||||
|
} |
||||
|
return nRet, nil |
||||
|
} |
||||
|
|
||||
|
// Flush flushes the Writer to its underlying io.Writer.
|
||||
|
func (w *Writer) Flush() error { |
||||
|
if w.err != nil { |
||||
|
return w.err |
||||
|
} |
||||
|
if len(w.ibuf) == 0 { |
||||
|
return nil |
||||
|
} |
||||
|
w.write(w.ibuf) |
||||
|
w.ibuf = w.ibuf[:0] |
||||
|
return w.err |
||||
|
} |
||||
|
|
||||
|
// Close calls Flush and then closes the Writer.
|
||||
|
func (w *Writer) Close() error { |
||||
|
w.Flush() |
||||
|
ret := w.err |
||||
|
if w.err == nil { |
||||
|
w.err = errClosed |
||||
|
} |
||||
|
return ret |
||||
|
} |
@ -0,0 +1,29 @@ |
|||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
|
// Use of this source code is governed by a BSD-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
// +build !appengine
|
||||
|
// +build gc
|
||||
|
// +build !noasm
|
||||
|
|
||||
|
package snappy |
||||
|
|
||||
|
// emitLiteral has the same semantics as in encode_other.go.
|
||||
|
//
|
||||
|
//go:noescape
|
||||
|
func emitLiteral(dst, lit []byte) int |
||||
|
|
||||
|
// emitCopy has the same semantics as in encode_other.go.
|
||||
|
//
|
||||
|
//go:noescape
|
||||
|
func emitCopy(dst []byte, offset, length int) int |
||||
|
|
||||
|
// extendMatch has the same semantics as in encode_other.go.
|
||||
|
//
|
||||
|
//go:noescape
|
||||
|
func extendMatch(src []byte, i, j int) int |
||||
|
|
||||
|
// encodeBlock has the same semantics as in encode_other.go.
|
||||
|
//
|
||||
|
//go:noescape
|
||||
|
func encodeBlock(dst, src []byte) (d int) |
@ -0,0 +1,730 @@ |
|||||
|
// Copyright 2016 The Go Authors. All rights reserved. |
||||
|
// Use of this source code is governed by a BSD-style |
||||
|
// license that can be found in the LICENSE file. |
||||
|
|
||||
|
// +build !appengine |
||||
|
// +build gc |
||||
|
// +build !noasm |
||||
|
|
||||
|
#include "textflag.h" |
||||
|
|
||||
|
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a |
||||
|
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and |
||||
|
// https://github.com/golang/snappy/issues/29 |
||||
|
// |
||||
|
// As a workaround, the package was built with a known good assembler, and |
||||
|
// those instructions were disassembled by "objdump -d" to yield the |
||||
|
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 |
||||
|
// style comments, in AT&T asm syntax. Note that rsp here is a physical |
||||
|
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). |
||||
|
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble |
||||
|
// fine on Go 1.6. |
||||
|
|
||||
|
// The asm code generally follows the pure Go code in encode_other.go, except |
||||
|
// where marked with a "!!!". |
||||
|
|
||||
|
// ---------------------------------------------------------------------------- |
||||
|
|
||||
|
// func emitLiteral(dst, lit []byte) int |
||||
|
// |
||||
|
// All local variables fit into registers. The register allocation: |
||||
|
// - AX len(lit) |
||||
|
// - BX n |
||||
|
// - DX return value |
||||
|
// - DI &dst[i] |
||||
|
// - R10 &lit[0] |
||||
|
// |
||||
|
// The 24 bytes of stack space is to call runtime·memmove. |
||||
|
// |
||||
|
// The unusual register allocation of local variables, such as R10 for the |
||||
|
// source pointer, matches the allocation used at the call site in encodeBlock, |
||||
|
// which makes it easier to manually inline this function. |
||||
|
TEXT ·emitLiteral(SB), NOSPLIT, $24-56 |
||||
|
MOVQ dst_base+0(FP), DI |
||||
|
MOVQ lit_base+24(FP), R10 |
||||
|
MOVQ lit_len+32(FP), AX |
||||
|
MOVQ AX, DX |
||||
|
MOVL AX, BX |
||||
|
SUBL $1, BX |
||||
|
|
||||
|
CMPL BX, $60 |
||||
|
JLT oneByte |
||||
|
CMPL BX, $256 |
||||
|
JLT twoBytes |
||||
|
|
||||
|
threeBytes: |
||||
|
MOVB $0xf4, 0(DI) |
||||
|
MOVW BX, 1(DI) |
||||
|
ADDQ $3, DI |
||||
|
ADDQ $3, DX |
||||
|
JMP memmove |
||||
|
|
||||
|
twoBytes: |
||||
|
MOVB $0xf0, 0(DI) |
||||
|
MOVB BX, 1(DI) |
||||
|
ADDQ $2, DI |
||||
|
ADDQ $2, DX |
||||
|
JMP memmove |
||||
|
|
||||
|
oneByte: |
||||
|
SHLB $2, BX |
||||
|
MOVB BX, 0(DI) |
||||
|
ADDQ $1, DI |
||||
|
ADDQ $1, DX |
||||
|
|
||||
|
memmove: |
||||
|
MOVQ DX, ret+48(FP) |
||||
|
|
||||
|
// copy(dst[i:], lit) |
||||
|
// |
||||
|
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push |
||||
|
// DI, R10 and AX as arguments. |
||||
|
MOVQ DI, 0(SP) |
||||
|
MOVQ R10, 8(SP) |
||||
|
MOVQ AX, 16(SP) |
||||
|
CALL runtime·memmove(SB) |
||||
|
RET |
||||
|
|
||||
|
// ---------------------------------------------------------------------------- |
||||
|
|
||||
|
// func emitCopy(dst []byte, offset, length int) int |
||||
|
// |
||||
|
// All local variables fit into registers. The register allocation: |
||||
|
// - AX length |
||||
|
// - SI &dst[0] |
||||
|
// - DI &dst[i] |
||||
|
// - R11 offset |
||||
|
// |
||||
|
// The unusual register allocation of local variables, such as R11 for the |
||||
|
// offset, matches the allocation used at the call site in encodeBlock, which |
||||
|
// makes it easier to manually inline this function. |
||||
|
TEXT ·emitCopy(SB), NOSPLIT, $0-48 |
||||
|
MOVQ dst_base+0(FP), DI |
||||
|
MOVQ DI, SI |
||||
|
MOVQ offset+24(FP), R11 |
||||
|
MOVQ length+32(FP), AX |
||||
|
|
||||
|
loop0: |
||||
|
// for length >= 68 { etc } |
||||
|
CMPL AX, $68 |
||||
|
JLT step1 |
||||
|
|
||||
|
// Emit a length 64 copy, encoded as 3 bytes. |
||||
|
MOVB $0xfe, 0(DI) |
||||
|
MOVW R11, 1(DI) |
||||
|
ADDQ $3, DI |
||||
|
SUBL $64, AX |
||||
|
JMP loop0 |
||||
|
|
||||
|
step1: |
||||
|
// if length > 64 { etc } |
||||
|
CMPL AX, $64 |
||||
|
JLE step2 |
||||
|
|
||||
|
// Emit a length 60 copy, encoded as 3 bytes. |
||||
|
MOVB $0xee, 0(DI) |
||||
|
MOVW R11, 1(DI) |
||||
|
ADDQ $3, DI |
||||
|
SUBL $60, AX |
||||
|
|
||||
|
step2: |
||||
|
// if length >= 12 || offset >= 2048 { goto step3 } |
||||
|
CMPL AX, $12 |
||||
|
JGE step3 |
||||
|
CMPL R11, $2048 |
||||
|
JGE step3 |
||||
|
|
||||
|
// Emit the remaining copy, encoded as 2 bytes. |
||||
|
MOVB R11, 1(DI) |
||||
|
SHRL $8, R11 |
||||
|
SHLB $5, R11 |
||||
|
SUBB $4, AX |
||||
|
SHLB $2, AX |
||||
|
ORB AX, R11 |
||||
|
ORB $1, R11 |
||||
|
MOVB R11, 0(DI) |
||||
|
ADDQ $2, DI |
||||
|
|
||||
|
// Return the number of bytes written. |
||||
|
SUBQ SI, DI |
||||
|
MOVQ DI, ret+40(FP) |
||||
|
RET |
||||
|
|
||||
|
step3: |
||||
|
// Emit the remaining copy, encoded as 3 bytes. |
||||
|
SUBL $1, AX |
||||
|
SHLB $2, AX |
||||
|
ORB $2, AX |
||||
|
MOVB AX, 0(DI) |
||||
|
MOVW R11, 1(DI) |
||||
|
ADDQ $3, DI |
||||
|
|
||||
|
// Return the number of bytes written. |
||||
|
SUBQ SI, DI |
||||
|
MOVQ DI, ret+40(FP) |
||||
|
RET |
||||
|
|
||||
|
// ---------------------------------------------------------------------------- |
||||
|
|
||||
|
// func extendMatch(src []byte, i, j int) int |
||||
|
// |
||||
|
// All local variables fit into registers. The register allocation: |
||||
|
// - DX &src[0] |
||||
|
// - SI &src[j] |
||||
|
// - R13 &src[len(src) - 8] |
||||
|
// - R14 &src[len(src)] |
||||
|
// - R15 &src[i] |
||||
|
// |
||||
|
// The unusual register allocation of local variables, such as R15 for a source |
||||
|
// pointer, matches the allocation used at the call site in encodeBlock, which |
||||
|
// makes it easier to manually inline this function. |
||||
|
TEXT ·extendMatch(SB), NOSPLIT, $0-48 |
||||
|
MOVQ src_base+0(FP), DX |
||||
|
MOVQ src_len+8(FP), R14 |
||||
|
MOVQ i+24(FP), R15 |
||||
|
MOVQ j+32(FP), SI |
||||
|
ADDQ DX, R14 |
||||
|
ADDQ DX, R15 |
||||
|
ADDQ DX, SI |
||||
|
MOVQ R14, R13 |
||||
|
SUBQ $8, R13 |
||||
|
|
||||
|
cmp8: |
||||
|
// As long as we are 8 or more bytes before the end of src, we can load and |
||||
|
// compare 8 bytes at a time. If those 8 bytes are equal, repeat. |
||||
|
CMPQ SI, R13 |
||||
|
JA cmp1 |
||||
|
MOVQ (R15), AX |
||||
|
MOVQ (SI), BX |
||||
|
CMPQ AX, BX |
||||
|
JNE bsf |
||||
|
ADDQ $8, R15 |
||||
|
ADDQ $8, SI |
||||
|
JMP cmp8 |
||||
|
|
||||
|
bsf: |
||||
|
// If those 8 bytes were not equal, XOR the two 8 byte values, and return |
||||
|
// the index of the first byte that differs. The BSF instruction finds the |
||||
|
// least significant 1 bit, the amd64 architecture is little-endian, and |
||||
|
// the shift by 3 converts a bit index to a byte index. |
||||
|
XORQ AX, BX |
||||
|
BSFQ BX, BX |
||||
|
SHRQ $3, BX |
||||
|
ADDQ BX, SI |
||||
|
|
||||
|
// Convert from &src[ret] to ret. |
||||
|
SUBQ DX, SI |
||||
|
MOVQ SI, ret+40(FP) |
||||
|
RET |
||||
|
|
||||
|
cmp1: |
||||
|
// In src's tail, compare 1 byte at a time. |
||||
|
CMPQ SI, R14 |
||||
|
JAE extendMatchEnd |
||||
|
MOVB (R15), AX |
||||
|
MOVB (SI), BX |
||||
|
CMPB AX, BX |
||||
|
JNE extendMatchEnd |
||||
|
ADDQ $1, R15 |
||||
|
ADDQ $1, SI |
||||
|
JMP cmp1 |
||||
|
|
||||
|
extendMatchEnd: |
||||
|
// Convert from &src[ret] to ret. |
||||
|
SUBQ DX, SI |
||||
|
MOVQ SI, ret+40(FP) |
||||
|
RET |
||||
|
|
||||
|
// ---------------------------------------------------------------------------- |
||||
|
|
||||
|
// func encodeBlock(dst, src []byte) (d int) |
||||
|
// |
||||
|
// All local variables fit into registers, other than "var table". The register |
||||
|
// allocation: |
||||
|
// - AX . . |
||||
|
// - BX . . |
||||
|
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). |
||||
|
// - DX 64 &src[0], tableSize |
||||
|
// - SI 72 &src[s] |
||||
|
// - DI 80 &dst[d] |
||||
|
// - R9 88 sLimit |
||||
|
// - R10 . &src[nextEmit] |
||||
|
// - R11 96 prevHash, currHash, nextHash, offset |
||||
|
// - R12 104 &src[base], skip |
||||
|
// - R13 . &src[nextS], &src[len(src) - 8] |
||||
|
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x |
||||
|
// - R15 112 candidate |
||||
|
// |
||||
|
// The second column (56, 64, etc) is the stack offset to spill the registers |
||||
|
// when calling other functions. We could pack this slightly tighter, but it's |
||||
|
// simpler to have a dedicated spill map independent of the function called. |
||||
|
// |
||||
|
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An |
||||
|
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill |
||||
|
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. |
||||
|
TEXT ·encodeBlock(SB), 0, $32888-56 |
||||
|
MOVQ dst_base+0(FP), DI |
||||
|
MOVQ src_base+24(FP), SI |
||||
|
MOVQ src_len+32(FP), R14 |
||||
|
|
||||
|
// shift, tableSize := uint32(32-8), 1<<8 |
||||
|
MOVQ $24, CX |
||||
|
MOVQ $256, DX |
||||
|
|
||||
|
calcShift: |
||||
|
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { |
||||
|
// shift-- |
||||
|
// } |
||||
|
CMPQ DX, $16384 |
||||
|
JGE varTable |
||||
|
CMPQ DX, R14 |
||||
|
JGE varTable |
||||
|
SUBQ $1, CX |
||||
|
SHLQ $1, DX |
||||
|
JMP calcShift |
||||
|
|
||||
|
varTable: |
||||
|
// var table [maxTableSize]uint16 |
||||
|
// |
||||
|
// In the asm code, unlike the Go code, we can zero-initialize only the |
||||
|
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU |
||||
|
// writes 16 bytes, so we can do only tableSize/8 writes instead of the |
||||
|
// 2048 writes that would zero-initialize all of table's 32768 bytes. |
||||
|
SHRQ $3, DX |
||||
|
LEAQ table-32768(SP), BX |
||||
|
PXOR X0, X0 |
||||
|
|
||||
|
memclr: |
||||
|
MOVOU X0, 0(BX) |
||||
|
ADDQ $16, BX |
||||
|
SUBQ $1, DX |
||||
|
JNZ memclr |
||||
|
|
||||
|
// !!! DX = &src[0] |
||||
|
MOVQ SI, DX |
||||
|
|
||||
|
// sLimit := len(src) - inputMargin |
||||
|
MOVQ R14, R9 |
||||
|
SUBQ $15, R9 |
||||
|
|
||||
|
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't |
||||
|
// change for the rest of the function. |
||||
|
MOVQ CX, 56(SP) |
||||
|
MOVQ DX, 64(SP) |
||||
|
MOVQ R9, 88(SP) |
||||
|
|
||||
|
// nextEmit := 0 |
||||
|
MOVQ DX, R10 |
||||
|
|
||||
|
// s := 1 |
||||
|
ADDQ $1, SI |
||||
|
|
||||
|
// nextHash := hash(load32(src, s), shift) |
||||
|
MOVL 0(SI), R11 |
||||
|
IMULL $0x1e35a7bd, R11 |
||||
|
SHRL CX, R11 |
||||
|
|
||||
|
outer: |
||||
|
// for { etc } |
||||
|
|
||||
|
// skip := 32 |
||||
|
MOVQ $32, R12 |
||||
|
|
||||
|
// nextS := s |
||||
|
MOVQ SI, R13 |
||||
|
|
||||
|
// candidate := 0 |
||||
|
MOVQ $0, R15 |
||||
|
|
||||
|
inner0: |
||||
|
// for { etc } |
||||
|
|
||||
|
// s := nextS |
||||
|
MOVQ R13, SI |
||||
|
|
||||
|
// bytesBetweenHashLookups := skip >> 5 |
||||
|
MOVQ R12, R14 |
||||
|
SHRQ $5, R14 |
||||
|
|
||||
|
// nextS = s + bytesBetweenHashLookups |
||||
|
ADDQ R14, R13 |
||||
|
|
||||
|
// skip += bytesBetweenHashLookups |
||||
|
ADDQ R14, R12 |
||||
|
|
||||
|
// if nextS > sLimit { goto emitRemainder } |
||||
|
MOVQ R13, AX |
||||
|
SUBQ DX, AX |
||||
|
CMPQ AX, R9 |
||||
|
JA emitRemainder |
||||
|
|
||||
|
// candidate = int(table[nextHash]) |
||||
|
// XXX: MOVWQZX table-32768(SP)(R11*2), R15 |
||||
|
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 |
||||
|
BYTE $0x4e |
||||
|
BYTE $0x0f |
||||
|
BYTE $0xb7 |
||||
|
BYTE $0x7c |
||||
|
BYTE $0x5c |
||||
|
BYTE $0x78 |
||||
|
|
||||
|
// table[nextHash] = uint16(s) |
||||
|
MOVQ SI, AX |
||||
|
SUBQ DX, AX |
||||
|
|
||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2) |
||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) |
||||
|
BYTE $0x66 |
||||
|
BYTE $0x42 |
||||
|
BYTE $0x89 |
||||
|
BYTE $0x44 |
||||
|
BYTE $0x5c |
||||
|
BYTE $0x78 |
||||
|
|
||||
|
// nextHash = hash(load32(src, nextS), shift) |
||||
|
MOVL 0(R13), R11 |
||||
|
IMULL $0x1e35a7bd, R11 |
||||
|
SHRL CX, R11 |
||||
|
|
||||
|
// if load32(src, s) != load32(src, candidate) { continue } break |
||||
|
MOVL 0(SI), AX |
||||
|
MOVL (DX)(R15*1), BX |
||||
|
CMPL AX, BX |
||||
|
JNE inner0 |
||||
|
|
||||
|
fourByteMatch: |
||||
|
// As per the encode_other.go code: |
||||
|
// |
||||
|
// A 4-byte match has been found. We'll later see etc. |
||||
|
|
||||
|
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment |
||||
|
// on inputMargin in encode.go. |
||||
|
MOVQ SI, AX |
||||
|
SUBQ R10, AX |
||||
|
CMPQ AX, $16 |
||||
|
JLE emitLiteralFastPath |
||||
|
|
||||
|
// ---------------------------------------- |
||||
|
// Begin inline of the emitLiteral call. |
||||
|
// |
||||
|
// d += emitLiteral(dst[d:], src[nextEmit:s]) |
||||
|
|
||||
|
MOVL AX, BX |
||||
|
SUBL $1, BX |
||||
|
|
||||
|
CMPL BX, $60 |
||||
|
JLT inlineEmitLiteralOneByte |
||||
|
CMPL BX, $256 |
||||
|
JLT inlineEmitLiteralTwoBytes |
||||
|
|
||||
|
inlineEmitLiteralThreeBytes: |
||||
|
MOVB $0xf4, 0(DI) |
||||
|
MOVW BX, 1(DI) |
||||
|
ADDQ $3, DI |
||||
|
JMP inlineEmitLiteralMemmove |
||||
|
|
||||
|
inlineEmitLiteralTwoBytes: |
||||
|
MOVB $0xf0, 0(DI) |
||||
|
MOVB BX, 1(DI) |
||||
|
ADDQ $2, DI |
||||
|
JMP inlineEmitLiteralMemmove |
||||
|
|
||||
|
inlineEmitLiteralOneByte: |
||||
|
SHLB $2, BX |
||||
|
MOVB BX, 0(DI) |
||||
|
ADDQ $1, DI |
||||
|
|
||||
|
inlineEmitLiteralMemmove: |
||||
|
// Spill local variables (registers) onto the stack; call; unspill. |
||||
|
// |
||||
|
// copy(dst[i:], lit) |
||||
|
// |
||||
|
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push |
||||
|
// DI, R10 and AX as arguments. |
||||
|
MOVQ DI, 0(SP) |
||||
|
MOVQ R10, 8(SP) |
||||
|
MOVQ AX, 16(SP) |
||||
|
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". |
||||
|
MOVQ SI, 72(SP) |
||||
|
MOVQ DI, 80(SP) |
||||
|
MOVQ R15, 112(SP) |
||||
|
CALL runtime·memmove(SB) |
||||
|
MOVQ 56(SP), CX |
||||
|
MOVQ 64(SP), DX |
||||
|
MOVQ 72(SP), SI |
||||
|
MOVQ 80(SP), DI |
||||
|
MOVQ 88(SP), R9 |
||||
|
MOVQ 112(SP), R15 |
||||
|
JMP inner1 |
||||
|
|
||||
|
inlineEmitLiteralEnd: |
||||
|
// End inline of the emitLiteral call. |
||||
|
// ---------------------------------------- |
||||
|
|
||||
|
emitLiteralFastPath: |
||||
|
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". |
||||
|
MOVB AX, BX |
||||
|
SUBB $1, BX |
||||
|
SHLB $2, BX |
||||
|
MOVB BX, (DI) |
||||
|
ADDQ $1, DI |
||||
|
|
||||
|
// !!! Implement the copy from lit to dst as a 16-byte load and store. |
||||
|
// (Encode's documentation says that dst and src must not overlap.) |
||||
|
// |
||||
|
// This always copies 16 bytes, instead of only len(lit) bytes, but that's |
||||
|
// OK. Subsequent iterations will fix up the overrun. |
||||
|
// |
||||
|
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or |
||||
|
// 16-byte loads and stores. This technique probably wouldn't be as |
||||
|
// effective on architectures that are fussier about alignment. |
||||
|
MOVOU 0(R10), X0 |
||||
|
MOVOU X0, 0(DI) |
||||
|
ADDQ AX, DI |
||||
|
|
||||
|
inner1: |
||||
|
// for { etc } |
||||
|
|
||||
|
// base := s |
||||
|
MOVQ SI, R12 |
||||
|
|
||||
|
// !!! offset := base - candidate |
||||
|
MOVQ R12, R11 |
||||
|
SUBQ R15, R11 |
||||
|
SUBQ DX, R11 |
||||
|
|
||||
|
// ---------------------------------------- |
||||
|
// Begin inline of the extendMatch call. |
||||
|
// |
||||
|
// s = extendMatch(src, candidate+4, s+4) |
||||
|
|
||||
|
// !!! R14 = &src[len(src)] |
||||
|
MOVQ src_len+32(FP), R14 |
||||
|
ADDQ DX, R14 |
||||
|
|
||||
|
// !!! R13 = &src[len(src) - 8] |
||||
|
MOVQ R14, R13 |
||||
|
SUBQ $8, R13 |
||||
|
|
||||
|
// !!! R15 = &src[candidate + 4] |
||||
|
ADDQ $4, R15 |
||||
|
ADDQ DX, R15 |
||||
|
|
||||
|
// !!! s += 4 |
||||
|
ADDQ $4, SI |
||||
|
|
||||
|
inlineExtendMatchCmp8: |
||||
|
// As long as we are 8 or more bytes before the end of src, we can load and |
||||
|
// compare 8 bytes at a time. If those 8 bytes are equal, repeat. |
||||
|
CMPQ SI, R13 |
||||
|
JA inlineExtendMatchCmp1 |
||||
|
MOVQ (R15), AX |
||||
|
MOVQ (SI), BX |
||||
|
CMPQ AX, BX |
||||
|
JNE inlineExtendMatchBSF |
||||
|
ADDQ $8, R15 |
||||
|
ADDQ $8, SI |
||||
|
JMP inlineExtendMatchCmp8 |
||||
|
|
||||
|
inlineExtendMatchBSF: |
||||
|
// If those 8 bytes were not equal, XOR the two 8 byte values, and return |
||||
|
// the index of the first byte that differs. The BSF instruction finds the |
||||
|
// least significant 1 bit, the amd64 architecture is little-endian, and |
||||
|
// the shift by 3 converts a bit index to a byte index. |
||||
|
XORQ AX, BX |
||||
|
BSFQ BX, BX |
||||
|
SHRQ $3, BX |
||||
|
ADDQ BX, SI |
||||
|
JMP inlineExtendMatchEnd |
||||
|
|
||||
|
inlineExtendMatchCmp1: |
||||
|
// In src's tail, compare 1 byte at a time. |
||||
|
CMPQ SI, R14 |
||||
|
JAE inlineExtendMatchEnd |
||||
|
MOVB (R15), AX |
||||
|
MOVB (SI), BX |
||||
|
CMPB AX, BX |
||||
|
JNE inlineExtendMatchEnd |
||||
|
ADDQ $1, R15 |
||||
|
ADDQ $1, SI |
||||
|
JMP inlineExtendMatchCmp1 |
||||
|
|
||||
|
inlineExtendMatchEnd: |
||||
|
// End inline of the extendMatch call. |
||||
|
// ---------------------------------------- |
||||
|
|
||||
|
// ---------------------------------------- |
||||
|
// Begin inline of the emitCopy call. |
||||
|
// |
||||
|
// d += emitCopy(dst[d:], base-candidate, s-base) |
||||
|
|
||||
|
// !!! length := s - base |
||||
|
MOVQ SI, AX |
||||
|
SUBQ R12, AX |
||||
|
|
||||
|
inlineEmitCopyLoop0: |
||||
|
// for length >= 68 { etc } |
||||
|
CMPL AX, $68 |
||||
|
JLT inlineEmitCopyStep1 |
||||
|
|
||||
|
// Emit a length 64 copy, encoded as 3 bytes. |
||||
|
MOVB $0xfe, 0(DI) |
||||
|
MOVW R11, 1(DI) |
||||
|
ADDQ $3, DI |
||||
|
SUBL $64, AX |
||||
|
JMP inlineEmitCopyLoop0 |
||||
|
|
||||
|
inlineEmitCopyStep1: |
||||
|
// if length > 64 { etc } |
||||
|
CMPL AX, $64 |
||||
|
JLE inlineEmitCopyStep2 |
||||
|
|
||||
|
// Emit a length 60 copy, encoded as 3 bytes. |
||||
|
MOVB $0xee, 0(DI) |
||||
|
MOVW R11, 1(DI) |
||||
|
ADDQ $3, DI |
||||
|
SUBL $60, AX |
||||
|
|
||||
|
inlineEmitCopyStep2: |
||||
|
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } |
||||
|
CMPL AX, $12 |
||||
|
JGE inlineEmitCopyStep3 |
||||
|
CMPL R11, $2048 |
||||
|
JGE inlineEmitCopyStep3 |
||||
|
|
||||
|
// Emit the remaining copy, encoded as 2 bytes. |
||||
|
MOVB R11, 1(DI) |
||||
|
SHRL $8, R11 |
||||
|
SHLB $5, R11 |
||||
|
SUBB $4, AX |
||||
|
SHLB $2, AX |
||||
|
ORB AX, R11 |
||||
|
ORB $1, R11 |
||||
|
MOVB R11, 0(DI) |
||||
|
ADDQ $2, DI |
||||
|
JMP inlineEmitCopyEnd |
||||
|
|
||||
|
inlineEmitCopyStep3: |
||||
|
// Emit the remaining copy, encoded as 3 bytes. |
||||
|
SUBL $1, AX |
||||
|
SHLB $2, AX |
||||
|
ORB $2, AX |
||||
|
MOVB AX, 0(DI) |
||||
|
MOVW R11, 1(DI) |
||||
|
ADDQ $3, DI |
||||
|
|
||||
|
inlineEmitCopyEnd: |
||||
|
// End inline of the emitCopy call. |
||||
|
// ---------------------------------------- |
||||
|
|
||||
|
// nextEmit = s |
||||
|
MOVQ SI, R10 |
||||
|
|
||||
|
// if s >= sLimit { goto emitRemainder } |
||||
|
MOVQ SI, AX |
||||
|
SUBQ DX, AX |
||||
|
CMPQ AX, R9 |
||||
|
JAE emitRemainder |
||||
|
|
||||
|
// As per the encode_other.go code: |
||||
|
// |
||||
|
// We could immediately etc. |
||||
|
|
||||
|
// x := load64(src, s-1) |
||||
|
MOVQ -1(SI), R14 |
||||
|
|
||||
|
// prevHash := hash(uint32(x>>0), shift) |
||||
|
MOVL R14, R11 |
||||
|
IMULL $0x1e35a7bd, R11 |
||||
|
SHRL CX, R11 |
||||
|
|
||||
|
// table[prevHash] = uint16(s-1) |
||||
|
MOVQ SI, AX |
||||
|
SUBQ DX, AX |
||||
|
SUBQ $1, AX |
||||
|
|
||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2) |
||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) |
||||
|
BYTE $0x66 |
||||
|
BYTE $0x42 |
||||
|
BYTE $0x89 |
||||
|
BYTE $0x44 |
||||
|
BYTE $0x5c |
||||
|
BYTE $0x78 |
||||
|
|
||||
|
// currHash := hash(uint32(x>>8), shift) |
||||
|
SHRQ $8, R14 |
||||
|
MOVL R14, R11 |
||||
|
IMULL $0x1e35a7bd, R11 |
||||
|
SHRL CX, R11 |
||||
|
|
||||
|
// candidate = int(table[currHash]) |
||||
|
// XXX: MOVWQZX table-32768(SP)(R11*2), R15 |
||||
|
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 |
||||
|
BYTE $0x4e |
||||
|
BYTE $0x0f |
||||
|
BYTE $0xb7 |
||||
|
BYTE $0x7c |
||||
|
BYTE $0x5c |
||||
|
BYTE $0x78 |
||||
|
|
||||
|
// table[currHash] = uint16(s) |
||||
|
ADDQ $1, AX |
||||
|
|
||||
|
// XXX: MOVW AX, table-32768(SP)(R11*2) |
||||
|
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) |
||||
|
BYTE $0x66 |
||||
|
BYTE $0x42 |
||||
|
BYTE $0x89 |
||||
|
BYTE $0x44 |
||||
|
BYTE $0x5c |
||||
|
BYTE $0x78 |
||||
|
|
||||
|
// if uint32(x>>8) == load32(src, candidate) { continue } |
||||
|
MOVL (DX)(R15*1), BX |
||||
|
CMPL R14, BX |
||||
|
JEQ inner1 |
||||
|
|
||||
|
// nextHash = hash(uint32(x>>16), shift) |
||||
|
SHRQ $8, R14 |
||||
|
MOVL R14, R11 |
||||
|
IMULL $0x1e35a7bd, R11 |
||||
|
SHRL CX, R11 |
||||
|
|
||||
|
// s++ |
||||
|
ADDQ $1, SI |
||||
|
|
||||
|
// break out of the inner1 for loop, i.e. continue the outer loop. |
||||
|
JMP outer |
||||
|
|
||||
|
emitRemainder: |
||||
|
// if nextEmit < len(src) { etc } |
||||
|
MOVQ src_len+32(FP), AX |
||||
|
ADDQ DX, AX |
||||
|
CMPQ R10, AX |
||||
|
JEQ encodeBlockEnd |
||||
|
|
||||
|
// d += emitLiteral(dst[d:], src[nextEmit:]) |
||||
|
// |
||||
|
// Push args. |
||||
|
MOVQ DI, 0(SP) |
||||
|
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. |
||||
|
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. |
||||
|
MOVQ R10, 24(SP) |
||||
|
SUBQ R10, AX |
||||
|
MOVQ AX, 32(SP) |
||||
|
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. |
||||
|
|
||||
|
// Spill local variables (registers) onto the stack; call; unspill. |
||||
|
MOVQ DI, 80(SP) |
||||
|
CALL ·emitLiteral(SB) |
||||
|
MOVQ 80(SP), DI |
||||
|
|
||||
|
// Finish the "d +=" part of "d += emitLiteral(etc)". |
||||
|
ADDQ 48(SP), DI |
||||
|
|
||||
|
encodeBlockEnd: |
||||
|
MOVQ dst_base+0(FP), AX |
||||
|
SUBQ AX, DI |
||||
|
MOVQ DI, d+48(FP) |
||||
|
RET |
@ -0,0 +1,238 @@ |
|||||
|
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
|
// Use of this source code is governed by a BSD-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
// +build !amd64 appengine !gc noasm
|
||||
|
|
||||
|
package snappy |
||||
|
|
||||
|
func load32(b []byte, i int) uint32 { |
||||
|
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
|
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 |
||||
|
} |
||||
|
|
||||
|
func load64(b []byte, i int) uint64 { |
||||
|
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
|
||||
|
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | |
||||
|
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 |
||||
|
} |
||||
|
|
||||
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
|
//
|
||||
|
// It assumes that:
|
||||
|
// dst is long enough to hold the encoded bytes
|
||||
|
// 1 <= len(lit) && len(lit) <= 65536
|
||||
|
func emitLiteral(dst, lit []byte) int { |
||||
|
i, n := 0, uint(len(lit)-1) |
||||
|
switch { |
||||
|
case n < 60: |
||||
|
dst[0] = uint8(n)<<2 | tagLiteral |
||||
|
i = 1 |
||||
|
case n < 1<<8: |
||||
|
dst[0] = 60<<2 | tagLiteral |
||||
|
dst[1] = uint8(n) |
||||
|
i = 2 |
||||
|
default: |
||||
|
dst[0] = 61<<2 | tagLiteral |
||||
|
dst[1] = uint8(n) |
||||
|
dst[2] = uint8(n >> 8) |
||||
|
i = 3 |
||||
|
} |
||||
|
return i + copy(dst[i:], lit) |
||||
|
} |
||||
|
|
||||
|
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
|
//
|
||||
|
// It assumes that:
|
||||
|
// dst is long enough to hold the encoded bytes
|
||||
|
// 1 <= offset && offset <= 65535
|
||||
|
// 4 <= length && length <= 65535
|
||||
|
func emitCopy(dst []byte, offset, length int) int { |
||||
|
i := 0 |
||||
|
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
|
||||
|
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
|
||||
|
// length emitted down below is is a little lower (at 60 = 64 - 4), because
|
||||
|
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
|
||||
|
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
|
||||
|
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
|
||||
|
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
|
||||
|
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
|
||||
|
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
|
||||
|
for length >= 68 { |
||||
|
// Emit a length 64 copy, encoded as 3 bytes.
|
||||
|
dst[i+0] = 63<<2 | tagCopy2 |
||||
|
dst[i+1] = uint8(offset) |
||||
|
dst[i+2] = uint8(offset >> 8) |
||||
|
i += 3 |
||||
|
length -= 64 |
||||
|
} |
||||
|
if length > 64 { |
||||
|
// Emit a length 60 copy, encoded as 3 bytes.
|
||||
|
dst[i+0] = 59<<2 | tagCopy2 |
||||
|
dst[i+1] = uint8(offset) |
||||
|
dst[i+2] = uint8(offset >> 8) |
||||
|
i += 3 |
||||
|
length -= 60 |
||||
|
} |
||||
|
if length >= 12 || offset >= 2048 { |
||||
|
// Emit the remaining copy, encoded as 3 bytes.
|
||||
|
dst[i+0] = uint8(length-1)<<2 | tagCopy2 |
||||
|
dst[i+1] = uint8(offset) |
||||
|
dst[i+2] = uint8(offset >> 8) |
||||
|
return i + 3 |
||||
|
} |
||||
|
// Emit the remaining copy, encoded as 2 bytes.
|
||||
|
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 |
||||
|
dst[i+1] = uint8(offset) |
||||
|
return i + 2 |
||||
|
} |
||||
|
|
||||
|
// extendMatch returns the largest k such that k <= len(src) and that
|
||||
|
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||
|
//
|
||||
|
// It assumes that:
|
||||
|
// 0 <= i && i < j && j <= len(src)
|
||||
|
func extendMatch(src []byte, i, j int) int { |
||||
|
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { |
||||
|
} |
||||
|
return j |
||||
|
} |
||||
|
|
||||
|
func hash(u, shift uint32) uint32 { |
||||
|
return (u * 0x1e35a7bd) >> shift |
||||
|
} |
||||
|
|
||||
|
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
|
// been written.
|
||||
|
//
|
||||
|
// It also assumes that:
|
||||
|
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
|
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
|
func encodeBlock(dst, src []byte) (d int) { |
||||
|
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
|
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||
|
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
|
||||
|
const ( |
||||
|
maxTableSize = 1 << 14 |
||||
|
// tableMask is redundant, but helps the compiler eliminate bounds
|
||||
|
// checks.
|
||||
|
tableMask = maxTableSize - 1 |
||||
|
) |
||||
|
shift := uint32(32 - 8) |
||||
|
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { |
||||
|
shift-- |
||||
|
} |
||||
|
// In Go, all array elements are zero-initialized, so there is no advantage
|
||||
|
// to a smaller tableSize per se. However, it matches the C++ algorithm,
|
||||
|
// and in the asm versions of this code, we can get away with zeroing only
|
||||
|
// the first tableSize elements.
|
||||
|
var table [maxTableSize]uint16 |
||||
|
|
||||
|
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
|
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
|
// looking for copies.
|
||||
|
sLimit := len(src) - inputMargin |
||||
|
|
||||
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||
|
nextEmit := 0 |
||||
|
|
||||
|
// The encoded form must start with a literal, as there are no previous
|
||||
|
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
|
s := 1 |
||||
|
nextHash := hash(load32(src, s), shift) |
||||
|
|
||||
|
for { |
||||
|
// Copied from the C++ snappy implementation:
|
||||
|
//
|
||||
|
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
||||
|
// found, start looking only at every other byte. If 32 more bytes are
|
||||
|
// scanned (or skipped), look at every third byte, etc.. When a match
|
||||
|
// is found, immediately go back to looking at every byte. This is a
|
||||
|
// small loss (~5% performance, ~0.1% density) for compressible data
|
||||
|
// due to more bookkeeping, but for non-compressible data (such as
|
||||
|
// JPEG) it's a huge win since the compressor quickly "realizes" the
|
||||
|
// data is incompressible and doesn't bother looking for matches
|
||||
|
// everywhere.
|
||||
|
//
|
||||
|
// The "skip" variable keeps track of how many bytes there are since
|
||||
|
// the last match; dividing it by 32 (ie. right-shifting by five) gives
|
||||
|
// the number of bytes to move ahead for each iteration.
|
||||
|
skip := 32 |
||||
|
|
||||
|
nextS := s |
||||
|
candidate := 0 |
||||
|
for { |
||||
|
s = nextS |
||||
|
bytesBetweenHashLookups := skip >> 5 |
||||
|
nextS = s + bytesBetweenHashLookups |
||||
|
skip += bytesBetweenHashLookups |
||||
|
if nextS > sLimit { |
||||
|
goto emitRemainder |
||||
|
} |
||||
|
candidate = int(table[nextHash&tableMask]) |
||||
|
table[nextHash&tableMask] = uint16(s) |
||||
|
nextHash = hash(load32(src, nextS), shift) |
||||
|
if load32(src, s) == load32(src, candidate) { |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
|
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
|
// them as literal bytes.
|
||||
|
d += emitLiteral(dst[d:], src[nextEmit:s]) |
||||
|
|
||||
|
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
|
// move. Repeat until we find no match for the input immediately after
|
||||
|
// what was consumed by the last emitCopy call.
|
||||
|
//
|
||||
|
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
|
// though we don't yet know how big the literal will be. We handle that
|
||||
|
// by proceeding to the next iteration of the main loop. We also can
|
||||
|
// exit this loop via goto if we get close to exhausting the input.
|
||||
|
for { |
||||
|
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
|
// literal bytes prior to s.
|
||||
|
base := s |
||||
|
|
||||
|
// Extend the 4-byte match as long as possible.
|
||||
|
//
|
||||
|
// This is an inlined version of:
|
||||
|
// s = extendMatch(src, candidate+4, s+4)
|
||||
|
s += 4 |
||||
|
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { |
||||
|
} |
||||
|
|
||||
|
d += emitCopy(dst[d:], base-candidate, s-base) |
||||
|
nextEmit = s |
||||
|
if s >= sLimit { |
||||
|
goto emitRemainder |
||||
|
} |
||||
|
|
||||
|
// We could immediately start working at s now, but to improve
|
||||
|
// compression we first update the hash table at s-1 and at s. If
|
||||
|
// another emitCopy is not our next move, also calculate nextHash
|
||||
|
// at s+1. At least on GOARCH=amd64, these three hash calculations
|
||||
|
// are faster as one load64 call (with some shifts) instead of
|
||||
|
// three load32 calls.
|
||||
|
x := load64(src, s-1) |
||||
|
prevHash := hash(uint32(x>>0), shift) |
||||
|
table[prevHash&tableMask] = uint16(s - 1) |
||||
|
currHash := hash(uint32(x>>8), shift) |
||||
|
candidate = int(table[currHash&tableMask]) |
||||
|
table[currHash&tableMask] = uint16(s) |
||||
|
if uint32(x>>8) != load32(src, candidate) { |
||||
|
nextHash = hash(uint32(x>>16), shift) |
||||
|
s++ |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
emitRemainder: |
||||
|
if nextEmit < len(src) { |
||||
|
d += emitLiteral(dst[d:], src[nextEmit:]) |
||||
|
} |
||||
|
return d |
||||
|
} |
1965
vendor/src/github.com/golang/snappy/golden_test.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,87 @@ |
|||||
|
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
// Use of this source code is governed by a BSD-style
|
||||
|
// license that can be found in the LICENSE file.
|
||||
|
|
||||
|
// Package snappy implements the snappy block-based compression format.
|
||||
|
// It aims for very high speeds and reasonable compression.
|
||||
|
//
|
||||
|
// The C++ snappy implementation is at https://github.com/google/snappy
|
||||
|
package snappy // import "github.com/golang/snappy"
|
||||
|
|
||||
|
import ( |
||||
|
"hash/crc32" |
||||
|
) |
||||
|
|
||||
|
/* |
||||
|
Each encoded block begins with the varint-encoded length of the decoded data, |
||||
|
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The |
||||
|
first byte of each chunk is broken into its 2 least and 6 most significant bits |
||||
|
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. |
||||
|
Zero means a literal tag. All other values mean a copy tag. |
||||
|
|
||||
|
For literal tags: |
||||
|
- If m < 60, the next 1 + m bytes are literal bytes. |
||||
|
- Otherwise, let n be the little-endian unsigned integer denoted by the next |
||||
|
m - 59 bytes. The next 1 + n bytes after that are literal bytes. |
||||
|
|
||||
|
For copy tags, length bytes are copied from offset bytes ago, in the style of |
||||
|
Lempel-Ziv compression algorithms. In particular: |
||||
|
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). |
||||
|
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 |
||||
|
of the offset. The next byte is bits 0-7 of the offset. |
||||
|
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). |
||||
|
The length is 1 + m. The offset is the little-endian unsigned integer |
||||
|
denoted by the next 2 bytes. |
||||
|
- For l == 3, this tag is a legacy format that is no longer issued by most |
||||
|
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in |
||||
|
[1, 65). The length is 1 + m. The offset is the little-endian unsigned |
||||
|
integer denoted by the next 4 bytes. |
||||
|
*/ |
||||
|
const ( |
||||
|
tagLiteral = 0x00 |
||||
|
tagCopy1 = 0x01 |
||||
|
tagCopy2 = 0x02 |
||||
|
tagCopy4 = 0x03 |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
checksumSize = 4 |
||||
|
chunkHeaderSize = 4 |
||||
|
magicChunk = "\xff\x06\x00\x00" + magicBody |
||||
|
magicBody = "sNaPpY" |
||||
|
|
||||
|
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
|
||||
|
// part of the wire format per se, but some parts of the encoder assume
|
||||
|
// that an offset fits into a uint16.
|
||||
|
//
|
||||
|
// Also, for the framing format (Writer type instead of Encode function),
|
||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt says
|
||||
|
// that "the uncompressed data in a chunk must be no longer than 65536
|
||||
|
// bytes".
|
||||
|
maxBlockSize = 65536 |
||||
|
|
||||
|
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
|
||||
|
// hard coded to be a const instead of a variable, so that obufLen can also
|
||||
|
// be a const. Their equivalence is confirmed by
|
||||
|
// TestMaxEncodedLenOfMaxBlockSize.
|
||||
|
maxEncodedLenOfMaxBlockSize = 76490 |
||||
|
|
||||
|
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize |
||||
|
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
chunkTypeCompressedData = 0x00 |
||||
|
chunkTypeUncompressedData = 0x01 |
||||
|
chunkTypePadding = 0xfe |
||||
|
chunkTypeStreamIdentifier = 0xff |
||||
|
) |
||||
|
|
||||
|
var crcTable = crc32.MakeTable(crc32.Castagnoli) |
||||
|
|
||||
|
// crc implements the checksum specified in section 3 of
|
||||
|
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
|
func crc(b []byte) uint32 { |
||||
|
c := crc32.Update(0, crcTable, b) |
||||
|
return uint32(c>>15|c<<17) + 0xa282ead8 |
||||
|
} |
1353
vendor/src/github.com/golang/snappy/snappy_test.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,396 @@ |
|||||
|
Produced by David Widger. The previous edition was updated by Jose |
||||
|
Menendez. |
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
THE ADVENTURES OF TOM SAWYER |
||||
|
BY |
||||
|
MARK TWAIN |
||||
|
(Samuel Langhorne Clemens) |
||||
|
|
||||
|
|
||||
|
|
||||
|
|
||||
|
P R E F A C E |
||||
|
|
||||
|
MOST of the adventures recorded in this book really occurred; one or |
||||
|
two were experiences of my own, the rest those of boys who were |
||||
|
schoolmates of mine. Huck Finn is drawn from life; Tom Sawyer also, but |
||||
|
not from an individual--he is a combination of the characteristics of |
||||
|
three boys whom I knew, and therefore belongs to the composite order of |
||||
|
architecture. |
||||
|
|
||||
|
The odd superstitions touched upon were all prevalent among children |
||||
|
and slaves in the West at the period of this story--that is to say, |
||||
|
thirty or forty years ago. |
||||
|
|
||||
|
Although my book is intended mainly for the entertainment of boys and |
||||
|
girls, I hope it will not be shunned by men and women on that account, |
||||
|
for part of my plan has been to try to pleasantly remind adults of what |
||||
|
they once were themselves, and of how they felt and thought and talked, |
||||
|
and what queer enterprises they sometimes engaged in. |
||||
|
|
||||
|
THE AUTHOR. |
||||
|
|
||||
|
HARTFORD, 1876. |
||||
|
|
||||
|
|
||||
|
|
||||
|
T O M S A W Y E R |
||||
|
|
||||
|
|
||||
|
|
||||
|
CHAPTER I |
||||
|
|
||||
|
"TOM!" |
||||
|
|
||||
|
No answer. |
||||
|
|
||||
|
"TOM!" |
||||
|
|
||||
|
No answer. |
||||
|
|
||||
|
"What's gone with that boy, I wonder? You TOM!" |
||||
|
|
||||
|
No answer. |
||||
|
|
||||
|
The old lady pulled her spectacles down and looked over them about the |
||||
|
room; then she put them up and looked out under them. She seldom or |
||||
|
never looked THROUGH them for so small a thing as a boy; they were her |
||||
|
state pair, the pride of her heart, and were built for "style," not |
||||
|
service--she could have seen through a pair of stove-lids just as well. |
||||
|
She looked perplexed for a moment, and then said, not fiercely, but |
||||
|
still loud enough for the furniture to hear: |
||||
|
|
||||
|
"Well, I lay if I get hold of you I'll--" |
||||
|
|
||||
|
She did not finish, for by this time she was bending down and punching |
||||
|
under the bed with the broom, and so she needed breath to punctuate the |
||||
|
punches with. She resurrected nothing but the cat. |
||||
|
|
||||
|
"I never did see the beat of that boy!" |
||||
|
|
||||
|
She went to the open door and stood in it and looked out among the |
||||
|
tomato vines and "jimpson" weeds that constituted the garden. No Tom. |
||||
|
So she lifted up her voice at an angle calculated for distance and |
||||
|
shouted: |
||||
|
|
||||
|
"Y-o-u-u TOM!" |
||||
|
|
||||
|
There was a slight noise behind her and she turned just in time to |
||||
|
seize a small boy by the slack of his roundabout and arrest his flight. |
||||
|
|
||||
|
"There! I might 'a' thought of that closet. What you been doing in |
||||
|
there?" |
||||
|
|
||||
|
"Nothing." |
||||
|
|
||||
|
"Nothing! Look at your hands. And look at your mouth. What IS that |
||||
|
truck?" |
||||
|
|
||||
|
"I don't know, aunt." |
||||
|
|
||||
|
"Well, I know. It's jam--that's what it is. Forty times I've said if |
||||
|
you didn't let that jam alone I'd skin you. Hand me that switch." |
||||
|
|
||||
|
The switch hovered in the air--the peril was desperate-- |
||||
|
|
||||
|
"My! Look behind you, aunt!" |
||||
|
|
||||
|
The old lady whirled round, and snatched her skirts out of danger. The |
||||
|
lad fled on the instant, scrambled up the high board-fence, and |
||||
|
disappeared over it. |
||||
|
|
||||
|
His aunt Polly stood surprised a moment, and then broke into a gentle |
||||
|
laugh. |
||||
|
|
||||
|
"Hang the boy, can't I never learn anything? Ain't he played me tricks |
||||
|
enough like that for me to be looking out for him by this time? But old |
||||
|
fools is the biggest fools there is. Can't learn an old dog new tricks, |
||||
|
as the saying is. But my goodness, he never plays them alike, two days, |
||||
|
and how is a body to know what's coming? He 'pears to know just how |
||||
|
long he can torment me before I get my dander up, and he knows if he |
||||
|
can make out to put me off for a minute or make me laugh, it's all down |
||||
|
again and I can't hit him a lick. I ain't doing my duty by that boy, |
||||
|
and that's the Lord's truth, goodness knows. Spare the rod and spile |
||||
|
the child, as the Good Book says. I'm a laying up sin and suffering for |
||||
|
us both, I know. He's full of the Old Scratch, but laws-a-me! he's my |
||||
|
own dead sister's boy, poor thing, and I ain't got the heart to lash |
||||
|
him, somehow. Every time I let him off, my conscience does hurt me so, |
||||
|
and every time I hit him my old heart most breaks. Well-a-well, man |
||||
|
that is born of woman is of few days and full of trouble, as the |
||||
|
Scripture says, and I reckon it's so. He'll play hookey this evening, * |
||||
|
and [* Southwestern for "afternoon"] I'll just be obleeged to make him |
||||
|
work, to-morrow, to punish him. It's mighty hard to make him work |
||||
|
Saturdays, when all the boys is having holiday, but he hates work more |
||||
|
than he hates anything else, and I've GOT to do some of my duty by him, |
||||
|
or I'll be the ruination of the child." |
||||
|
|
||||
|
Tom did play hookey, and he had a very good time. He got back home |
||||
|
barely in season to help Jim, the small colored boy, saw next-day's |
||||
|
wood and split the kindlings before supper--at least he was there in |
||||
|
time to tell his adventures to Jim while Jim did three-fourths of the |
||||
|
work. Tom's younger brother (or rather half-brother) Sid was already |
||||
|
through with his part of the work (picking up chips), for he was a |
||||
|
quiet boy, and had no adventurous, troublesome ways. |
||||
|
|
||||
|
While Tom was eating his supper, and stealing sugar as opportunity |
||||
|
offered, Aunt Polly asked him questions that were full of guile, and |
||||
|
very deep--for she wanted to trap him into damaging revealments. Like |
||||
|
many other simple-hearted souls, it was her pet vanity to believe she |
||||
|
was endowed with a talent for dark and mysterious diplomacy, and she |
||||
|
loved to contemplate her most transparent devices as marvels of low |
||||
|
cunning. Said she: |
||||
|
|
||||
|
"Tom, it was middling warm in school, warn't it?" |
||||
|
|
||||
|
"Yes'm." |
||||
|
|
||||
|
"Powerful warm, warn't it?" |
||||
|
|
||||
|
"Yes'm." |
||||
|
|
||||
|
"Didn't you want to go in a-swimming, Tom?" |
||||
|
|
||||
|
A bit of a scare shot through Tom--a touch of uncomfortable suspicion. |
||||
|
He searched Aunt Polly's face, but it told him nothing. So he said: |
||||
|
|
||||
|
"No'm--well, not very much." |
||||
|
|
||||
|
The old lady reached out her hand and felt Tom's shirt, and said: |
||||
|
|
||||
|
"But you ain't too warm now, though." And it flattered her to reflect |
||||
|
that she had discovered that the shirt was dry without anybody knowing |
||||
|
that that was what she had in her mind. But in spite of her, Tom knew |
||||
|
where the wind lay, now. So he forestalled what might be the next move: |
||||
|
|
||||
|
"Some of us pumped on our heads--mine's damp yet. See?" |
||||
|
|
||||
|
Aunt Polly was vexed to think she had overlooked that bit of |
||||
|
circumstantial evidence, and missed a trick. Then she had a new |
||||
|
inspiration: |
||||
|
|
||||
|
"Tom, you didn't have to undo your shirt collar where I sewed it, to |
||||
|
pump on your head, did you? Unbutton your jacket!" |
||||
|
|
||||
|
The trouble vanished out of Tom's face. He opened his jacket. His |
||||
|
shirt collar was securely sewed. |
||||
|
|
||||
|
"Bother! Well, go 'long with you. I'd made sure you'd played hookey |
||||
|
and been a-swimming. But I forgive ye, Tom. I reckon you're a kind of a |
||||
|
singed cat, as the saying is--better'n you look. THIS time." |
||||
|
|
||||
|
She was half sorry her sagacity had miscarried, and half glad that Tom |
||||
|
had stumbled into obedient conduct for once. |
||||
|
|
||||
|
But Sidney said: |
||||
|
|
||||
|
"Well, now, if I didn't think you sewed his collar with white thread, |
||||
|
but it's black." |
||||
|
|
||||
|
"Why, I did sew it with white! Tom!" |
||||
|
|
||||
|
But Tom did not wait for the rest. As he went out at the door he said: |
||||
|
|
||||
|
"Siddy, I'll lick you for that." |
||||
|
|
||||
|
In a safe place Tom examined two large needles which were thrust into |
||||
|
the lapels of his jacket, and had thread bound about them--one needle |
||||
|
carried white thread and the other black. He said: |
||||
|
|
||||
|
"She'd never noticed if it hadn't been for Sid. Confound it! sometimes |
||||
|
she sews it with white, and sometimes she sews it with black. I wish to |
||||
|
geeminy she'd stick to one or t'other--I can't keep the run of 'em. But |
||||
|
I bet you I'll lam Sid for that. I'll learn him!" |
||||
|
|
||||
|
He was not the Model Boy of the village. He knew the model boy very |
||||
|
well though--and loathed him. |
||||
|
|
||||
|
Within two minutes, or even less, he had forgotten all his troubles. |
||||
|
Not because his troubles were one whit less heavy and bitter to him |
||||
|
than a man's are to a man, but because a new and powerful interest bore |
||||
|
them down and drove them out of his mind for the time--just as men's |
||||
|
misfortunes are forgotten in the excitement of new enterprises. This |
||||
|
new interest was a valued novelty in whistling, which he had just |
||||
|
acquired from a negro, and he was suffering to practise it undisturbed. |
||||
|
It consisted in a peculiar bird-like turn, a sort of liquid warble, |
||||
|
produced by touching the tongue to the roof of the mouth at short |
||||
|
intervals in the midst of the music--the reader probably remembers how |
||||
|
to do it, if he has ever been a boy. Diligence and attention soon gave |
||||
|
him the knack of it, and he strode down the street with his mouth full |
||||
|
of harmony and his soul full of gratitude. He felt much as an |
||||
|
astronomer feels who has discovered a new planet--no doubt, as far as |
||||
|
strong, deep, unalloyed pleasure is concerned, the advantage was with |
||||
|
the boy, not the astronomer. |
||||
|
|
||||
|
The summer evenings were long. It was not dark, yet. Presently Tom |
||||
|
checked his whistle. A stranger was before him--a boy a shade larger |
||||
|
than himself. A new-comer of any age or either sex was an impressive |
||||
|
curiosity in the poor little shabby village of St. Petersburg. This boy |
||||
|
was well dressed, too--well dressed on a week-day. This was simply |
||||
|
astounding. His cap was a dainty thing, his close-buttoned blue cloth |
||||
|
roundabout was new and natty, and so were his pantaloons. He had shoes |
||||
|
on--and it was only Friday. He even wore a necktie, a bright bit of |
||||
|
ribbon. He had a citified air about him that ate into Tom's vitals. The |
||||
|
more Tom stared at the splendid marvel, the higher he turned up his |
||||
|
nose at his finery and the shabbier and shabbier his own outfit seemed |
||||
|
to him to grow. Neither boy spoke. If one moved, the other moved--but |
||||
|
only sidewise, in a circle; they kept face to face and eye to eye all |
||||
|
the time. Finally Tom said: |
||||
|
|
||||
|
"I can lick you!" |
||||
|
|
||||
|
"I'd like to see you try it." |
||||
|
|
||||
|
"Well, I can do it." |
||||
|
|
||||
|
"No you can't, either." |
||||
|
|
||||
|
"Yes I can." |
||||
|
|
||||
|
"No you can't." |
||||
|
|
||||
|
"I can." |
||||
|
|
||||
|
"You can't." |
||||
|
|
||||
|
"Can!" |
||||
|
|
||||
|
"Can't!" |
||||
|
|
||||
|
An uncomfortable pause. Then Tom said: |
||||
|
|
||||
|
"What's your name?" |
||||
|
|
||||
|
"'Tisn't any of your business, maybe." |
||||
|
|
||||
|
"Well I 'low I'll MAKE it my business." |
||||
|
|
||||
|
"Well why don't you?" |
||||
|
|
||||
|
"If you say much, I will." |
||||
|
|
||||
|
"Much--much--MUCH. There now." |
||||
|
|
||||
|
"Oh, you think you're mighty smart, DON'T you? I could lick you with |
||||
|
one hand tied behind me, if I wanted to." |
||||
|
|
||||
|
"Well why don't you DO it? You SAY you can do it." |
||||
|
|
||||
|
"Well I WILL, if you fool with me." |
||||
|
|
||||
|
"Oh yes--I've seen whole families in the same fix." |
||||
|
|
||||
|
"Smarty! You think you're SOME, now, DON'T you? Oh, what a hat!" |
||||
|
|
||||
|
"You can lump that hat if you don't like it. I dare you to knock it |
||||
|
off--and anybody that'll take a dare will suck eggs." |
||||
|
|
||||
|
"You're a liar!" |
||||
|
|
||||
|
"You're another." |
||||
|
|
||||
|
"You're a fighting liar and dasn't take it up." |
||||
|
|
||||
|
"Aw--take a walk!" |
||||
|
|
||||
|
"Say--if you give me much more of your sass I'll take and bounce a |
||||
|
rock off'n your head." |
||||
|
|
||||
|
"Oh, of COURSE you will." |
||||
|
|
||||
|
"Well I WILL." |
||||
|
|
||||
|
"Well why don't you DO it then? What do you keep SAYING you will for? |
||||
|
Why don't you DO it? It's because you're afraid." |
||||
|
|
||||
|
"I AIN'T afraid." |
||||
|
|
||||
|
"You are." |
||||
|
|
||||
|
"I ain't." |
||||
|
|
||||
|
"You are." |
||||
|
|
||||
|
Another pause, and more eying and sidling around each other. Presently |
||||
|
they were shoulder to shoulder. Tom said: |
||||
|
|
||||
|
"Get away from here!" |
||||
|
|
||||
|
"Go away yourself!" |
||||
|
|
||||
|
"I won't." |
||||
|
|
||||
|
"I won't either." |
||||
|
|
||||
|
So they stood, each with a foot placed at an angle as a brace, and |
||||
|
both shoving with might and main, and glowering at each other with |
||||
|
hate. But neither could get an advantage. After struggling till both |
||||
|
were hot and flushed, each relaxed his strain with watchful caution, |
||||
|
and Tom said: |
||||
|
|
||||
|
"You're a coward and a pup. I'll tell my big brother on you, and he |
||||
|
can thrash you with his little finger, and I'll make him do it, too." |
||||
|
|
||||
|
"What do I care for your big brother? I've got a brother that's bigger |
||||
|
than he is--and what's more, he can throw him over that fence, too." |
||||
|
[Both brothers were imaginary.] |
||||
|
|
||||
|
"That's a lie." |
||||
|
|
||||
|
"YOUR saying so don't make it so." |
||||
|
|
||||
|
Tom drew a line in the dust with his big toe, and said: |
||||
|
|
||||
|
"I dare you to step over that, and I'll lick you till you can't stand |
||||
|
up. Anybody that'll take a dare will steal sheep." |
||||
|
|
||||
|
The new boy stepped over promptly, and said: |
||||
|
|
||||
|
"Now you said you'd do it, now let's see you do it." |
||||
|
|
||||
|
"Don't you crowd me now; you better look out." |
||||
|
|
||||
|
"Well, you SAID you'd do it--why don't you do it?" |
||||
|
|
||||
|
"By jingo! for two cents I WILL do it." |
||||
|
|
||||
|
The new boy took two broad coppers out of his pocket and held them out |
||||
|
with derision. Tom struck them to the ground. In an instant both boys |
||||
|
were rolling and tumbling in the dirt, gripped together like cats; and |
||||
|
for the space of a minute they tugged and tore at each other's hair and |
||||
|
clothes, punched and scratched each other's nose, and covered |
||||
|
themselves with dust and glory. Presently the confusion took form, and |
||||
|
through the fog of battle Tom appeared, seated astride the new boy, and |
||||
|
pounding him with his fists. "Holler 'nuff!" said he. |
||||
|
|
||||
|
The boy only struggled to free himself. He was crying--mainly from rage. |
||||
|
|
||||
|
"Holler 'nuff!"--and the pounding went on. |
||||
|
|
||||
|
At last the stranger got out a smothered "'Nuff!" and Tom let him up |
||||
|
and said: |
||||
|
|
||||
|
"Now that'll learn you. Better look out who you're fooling with next |
||||
|
time." |
||||
|
|
||||
|
The new boy went off brushing the dust from his clothes, sobbing, |
||||
|
snuffling, and occasionally looking back and shaking his head and |
||||
|
threatening what he would do to Tom the "next time he caught him out." |
||||
|
To which Tom responded with jeers, and started off in high feather, and |
||||
|
as soon as his back was turned the new boy snatched up a stone, threw |
||||
|
it and hit him between the shoulders and then turned tail and ran like |
||||
|
an antelope. Tom chased the traitor home, and thus found out where he |
||||
|
lived. He then held a position at the gate for some time, daring the |
||||
|
enemy to come outside, but the enemy only made faces at him through the |
||||
|
window and declined. At last the enemy's mother appeared, and called |
||||
|
Tom a bad, vicious, vulgar child, and ordered him away. So he went |
||||
|
away; but he said he "'lowed" to "lay" for that boy. |
||||
|
|
||||
|
He got home pretty late that night, and when he climbed cautiously in |
||||
|
at the window, he uncovered an ambuscade, in the person of his aunt; |
||||
|
and when she saw the state his clothes were in her resolution to turn |
||||
|
his Saturday holiday into captivity at hard labor became adamantine in |
||||
|
its firmness. |
@ -0,0 +1,202 @@ |
|||||
|
|
||||
|
Apache License |
||||
|
Version 2.0, January 2004 |
||||
|
http://www.apache.org/licenses/ |
||||
|
|
||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
|
||||
|
1. Definitions. |
||||
|
|
||||
|
"License" shall mean the terms and conditions for use, reproduction, |
||||
|
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
|
||||
|
"Licensor" shall mean the copyright owner or entity authorized by |
||||
|
the copyright owner that is granting the License. |
||||
|
|
||||
|
"Legal Entity" shall mean the union of the acting entity and all |
||||
|
other entities that control, are controlled by, or are under common |
||||
|
control with that entity. For the purposes of this definition, |
||||
|
"control" means (i) the power, direct or indirect, to cause the |
||||
|
direction or management of such entity, whether by contract or |
||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
|
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
|
||||
|
"You" (or "Your") shall mean an individual or Legal Entity |
||||
|
exercising permissions granted by this License. |
||||
|
|
||||
|
"Source" form shall mean the preferred form for making modifications, |
||||
|
including but not limited to software source code, documentation |
||||
|
source, and configuration files. |
||||
|
|
||||
|
"Object" form shall mean any form resulting from mechanical |
||||
|
transformation or translation of a Source form, including but |
||||
|
not limited to compiled object code, generated documentation, |
||||
|
and conversions to other media types. |
||||
|
|
||||
|
"Work" shall mean the work of authorship, whether in Source or |
||||
|
Object form, made available under the License, as indicated by a |
||||
|
copyright notice that is included in or attached to the work |
||||
|
(an example is provided in the Appendix below). |
||||
|
|
||||
|
"Derivative Works" shall mean any work, whether in Source or Object |
||||
|
form, that is based on (or derived from) the Work and for which the |
||||
|
editorial revisions, annotations, elaborations, or other modifications |
||||
|
represent, as a whole, an original work of authorship. For the purposes |
||||
|
of this License, Derivative Works shall not include works that remain |
||||
|
separable from, or merely link (or bind by name) to the interfaces of, |
||||
|
the Work and Derivative Works thereof. |
||||
|
|
||||
|
"Contribution" shall mean any work of authorship, including |
||||
|
the original version of the Work and any modifications or additions |
||||
|
to that Work or Derivative Works thereof, that is intentionally |
||||
|
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
|
or by an individual or Legal Entity authorized to submit on behalf of |
||||
|
the copyright owner. For the purposes of this definition, "submitted" |
||||
|
means any form of electronic, verbal, or written communication sent |
||||
|
to the Licensor or its representatives, including but not limited to |
||||
|
communication on electronic mailing lists, source code control systems, |
||||
|
and issue tracking systems that are managed by, or on behalf of, the |
||||
|
Licensor for the purpose of discussing and improving the Work, but |
||||
|
excluding communication that is conspicuously marked or otherwise |
||||
|
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
|
||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
|
on behalf of whom a Contribution has been received by Licensor and |
||||
|
subsequently incorporated within the Work. |
||||
|
|
||||
|
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
|
copyright license to reproduce, prepare Derivative Works of, |
||||
|
publicly display, publicly perform, sublicense, and distribute the |
||||
|
Work and such Derivative Works in Source or Object form. |
||||
|
|
||||
|
3. Grant of Patent License. Subject to the terms and conditions of |
||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
|
(except as stated in this section) patent license to make, have made, |
||||
|
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
|
where such license applies only to those patent claims licensable |
||||
|
by such Contributor that are necessarily infringed by their |
||||
|
Contribution(s) alone or by combination of their Contribution(s) |
||||
|
with the Work to which such Contribution(s) was submitted. If You |
||||
|
institute patent litigation against any entity (including a |
||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
|
or a Contribution incorporated within the Work constitutes direct |
||||
|
or contributory patent infringement, then any patent licenses |
||||
|
granted to You under this License for that Work shall terminate |
||||
|
as of the date such litigation is filed. |
||||
|
|
||||
|
4. Redistribution. You may reproduce and distribute copies of the |
||||
|
Work or Derivative Works thereof in any medium, with or without |
||||
|
modifications, and in Source or Object form, provided that You |
||||
|
meet the following conditions: |
||||
|
|
||||
|
(a) You must give any other recipients of the Work or |
||||
|
Derivative Works a copy of this License; and |
||||
|
|
||||
|
(b) You must cause any modified files to carry prominent notices |
||||
|
stating that You changed the files; and |
||||
|
|
||||
|
(c) You must retain, in the Source form of any Derivative Works |
||||
|
that You distribute, all copyright, patent, trademark, and |
||||
|
attribution notices from the Source form of the Work, |
||||
|
excluding those notices that do not pertain to any part of |
||||
|
the Derivative Works; and |
||||
|
|
||||
|
(d) If the Work includes a "NOTICE" text file as part of its |
||||
|
distribution, then any Derivative Works that You distribute must |
||||
|
include a readable copy of the attribution notices contained |
||||
|
within such NOTICE file, excluding those notices that do not |
||||
|
pertain to any part of the Derivative Works, in at least one |
||||
|
of the following places: within a NOTICE text file distributed |
||||
|
as part of the Derivative Works; within the Source form or |
||||
|
documentation, if provided along with the Derivative Works; or, |
||||
|
within a display generated by the Derivative Works, if and |
||||
|
wherever such third-party notices normally appear. The contents |
||||
|
of the NOTICE file are for informational purposes only and |
||||
|
do not modify the License. You may add Your own attribution |
||||
|
notices within Derivative Works that You distribute, alongside |
||||
|
or as an addendum to the NOTICE text from the Work, provided |
||||
|
that such additional attribution notices cannot be construed |
||||
|
as modifying the License. |
||||
|
|
||||
|
You may add Your own copyright statement to Your modifications and |
||||
|
may provide additional or different license terms and conditions |
||||
|
for use, reproduction, or distribution of Your modifications, or |
||||
|
for any such Derivative Works as a whole, provided Your use, |
||||
|
reproduction, and distribution of the Work otherwise complies with |
||||
|
the conditions stated in this License. |
||||
|
|
||||
|
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
|
any Contribution intentionally submitted for inclusion in the Work |
||||
|
by You to the Licensor shall be under the terms and conditions of |
||||
|
this License, without any additional terms or conditions. |
||||
|
Notwithstanding the above, nothing herein shall supersede or modify |
||||
|
the terms of any separate license agreement you may have executed |
||||
|
with Licensor regarding such Contributions. |
||||
|
|
||||
|
6. Trademarks. This License does not grant permission to use the trade |
||||
|
names, trademarks, service marks, or product names of the Licensor, |
||||
|
except as required for reasonable and customary use in describing the |
||||
|
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
|
||||
|
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
|
agreed to in writing, Licensor provides the Work (and each |
||||
|
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
|
implied, including, without limitation, any warranties or conditions |
||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
|
appropriateness of using or redistributing the Work and assume any |
||||
|
risks associated with Your exercise of permissions under this License. |
||||
|
|
||||
|
8. Limitation of Liability. In no event and under no legal theory, |
||||
|
whether in tort (including negligence), contract, or otherwise, |
||||
|
unless required by applicable law (such as deliberate and grossly |
||||
|
negligent acts) or agreed to in writing, shall any Contributor be |
||||
|
liable to You for damages, including any direct, indirect, special, |
||||
|
incidental, or consequential damages of any character arising as a |
||||
|
result of this License or out of the use or inability to use the |
||||
|
Work (including but not limited to damages for loss of goodwill, |
||||
|
work stoppage, computer failure or malfunction, or any and all |
||||
|
other commercial damages or losses), even if such Contributor |
||||
|
has been advised of the possibility of such damages. |
||||
|
|
||||
|
9. Accepting Warranty or Additional Liability. While redistributing |
||||
|
the Work or Derivative Works thereof, You may choose to offer, |
||||
|
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
|
or other liability obligations and/or rights consistent with this |
||||
|
License. However, in accepting such obligations, You may act only |
||||
|
on Your own behalf and on Your sole responsibility, not on behalf |
||||
|
of any other Contributor, and only if You agree to indemnify, |
||||
|
defend, and hold each Contributor harmless for any liability |
||||
|
incurred by, or claims asserted against, such Contributor by reason |
||||
|
of your accepting any such warranty or additional liability. |
||||
|
|
||||
|
END OF TERMS AND CONDITIONS |
||||
|
|
||||
|
APPENDIX: How to apply the Apache License to your work. |
||||
|
|
||||
|
To apply the Apache License to your work, attach the following |
||||
|
boilerplate notice, with the fields enclosed by brackets "[]" |
||||
|
replaced with your own identifying information. (Don't include |
||||
|
the brackets!) The text should be enclosed in the appropriate |
||||
|
comment syntax for the file format. We also recommend that a |
||||
|
file or class name and description of purpose be included on the |
||||
|
same "printed page" as the copyright notice for easier |
||||
|
identification within third-party archives. |
||||
|
|
||||
|
Copyright [yyyy] [name of copyright owner] |
||||
|
|
||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
|
you may not use this file except in compliance with the License. |
||||
|
You may obtain a copy of the License at |
||||
|
|
||||
|
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
|
||||
|
Unless required by applicable law or agreed to in writing, software |
||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
|
See the License for the specific language governing permissions and |
||||
|
limitations under the License. |
@ -0,0 +1,12 @@ |
|||||
|
# BTree implementation for Go |
||||
|
|
||||
|
![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) |
||||
|
|
||||
|
This package provides an in-memory B-Tree implementation for Go, useful as |
||||
|
an ordered, mutable data structure. |
||||
|
|
||||
|
The API is based off of the wonderful |
||||
|
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to |
||||
|
act as a drop-in replacement for gollrb trees. |
||||
|
|
||||
|
See http://godoc.org/github.com/google/btree for documentation. |
@ -0,0 +1,738 @@ |
|||||
|
// Copyright 2014 Google Inc.
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
// you may not use this file except in compliance with the License.
|
||||
|
// You may obtain a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
// See the License for the specific language governing permissions and
|
||||
|
// limitations under the License.
|
||||
|
|
||||
|
// Package btree implements in-memory B-Trees of arbitrary degree.
|
||||
|
//
|
||||
|
// btree implements an in-memory B-Tree for use as an ordered data structure.
|
||||
|
// It is not meant for persistent storage solutions.
|
||||
|
//
|
||||
|
// It has a flatter structure than an equivalent red-black or other binary tree,
|
||||
|
// which in some cases yields better memory usage and/or performance.
|
||||
|
// See some discussion on the matter here:
|
||||
|
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
|
||||
|
// Note, though, that this project is in no way related to the C++ B-Tree
|
||||
|
// implementation written about there.
|
||||
|
//
|
||||
|
// Within this tree, each node contains a slice of items and a (possibly nil)
|
||||
|
// slice of children. For basic numeric values or raw structs, this can cause
|
||||
|
// efficiency differences when compared to equivalent C++ template code that
|
||||
|
// stores values in arrays within the node:
|
||||
|
// * Due to the overhead of storing values as interfaces (each
|
||||
|
// value needs to be stored as the value itself, then 2 words for the
|
||||
|
// interface pointing to that value and its type), resulting in higher
|
||||
|
// memory use.
|
||||
|
// * Since interfaces can point to values anywhere in memory, values are
|
||||
|
// most likely not stored in contiguous blocks, resulting in a higher
|
||||
|
// number of cache misses.
|
||||
|
// These issues don't tend to matter, though, when working with strings or other
|
||||
|
// heap-allocated structures, since C++-equivalent structures also must store
|
||||
|
// pointers and also distribute their values across the heap.
|
||||
|
//
|
||||
|
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
|
||||
|
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
|
||||
|
// widely used ordered tree implementation in the Go ecosystem currently.
|
||||
|
// Its functions, therefore, exactly mirror those of
|
||||
|
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
|
||||
|
// support storing multiple equivalent values.
|
||||
|
package btree |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"io" |
||||
|
"sort" |
||||
|
"strings" |
||||
|
) |
||||
|
|
||||
|
// Item represents a single object in the tree.
|
||||
|
type Item interface { |
||||
|
// Less tests whether the current item is less than the given argument.
|
||||
|
//
|
||||
|
// This must provide a strict weak ordering.
|
||||
|
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
|
||||
|
// hold one of either a or b in the tree).
|
||||
|
Less(than Item) bool |
||||
|
} |
||||
|
|
||||
|
const ( |
||||
|
DefaultFreeListSize = 32 |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
nilItems = make(items, 16) |
||||
|
nilChildren = make(children, 16) |
||||
|
) |
||||
|
|
||||
|
// FreeList represents a free list of btree nodes. By default each
|
||||
|
// BTree has its own FreeList, but multiple BTrees can share the same
|
||||
|
// FreeList.
|
||||
|
// Two Btrees using the same freelist are not safe for concurrent write access.
|
||||
|
type FreeList struct { |
||||
|
freelist []*node |
||||
|
} |
||||
|
|
||||
|
// NewFreeList creates a new free list.
|
||||
|
// size is the maximum size of the returned free list.
|
||||
|
func NewFreeList(size int) *FreeList { |
||||
|
return &FreeList{freelist: make([]*node, 0, size)} |
||||
|
} |
||||
|
|
||||
|
func (f *FreeList) newNode() (n *node) { |
||||
|
index := len(f.freelist) - 1 |
||||
|
if index < 0 { |
||||
|
return new(node) |
||||
|
} |
||||
|
n = f.freelist[index] |
||||
|
f.freelist[index] = nil |
||||
|
f.freelist = f.freelist[:index] |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (f *FreeList) freeNode(n *node) { |
||||
|
if len(f.freelist) < cap(f.freelist) { |
||||
|
f.freelist = append(f.freelist, n) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
|
||||
|
// the tree. When this function returns false, iteration will stop and the
|
||||
|
// associated Ascend* function will immediately return.
|
||||
|
type ItemIterator func(i Item) bool |
||||
|
|
||||
|
// New creates a new B-Tree with the given degree.
|
||||
|
//
|
||||
|
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
|
||||
|
// and 2-4 children).
|
||||
|
func New(degree int) *BTree { |
||||
|
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) |
||||
|
} |
||||
|
|
||||
|
// NewWithFreeList creates a new B-Tree that uses the given node free list.
|
||||
|
func NewWithFreeList(degree int, f *FreeList) *BTree { |
||||
|
if degree <= 1 { |
||||
|
panic("bad degree") |
||||
|
} |
||||
|
return &BTree{ |
||||
|
degree: degree, |
||||
|
freelist: f, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// items stores items in a node.
|
||||
|
type items []Item |
||||
|
|
||||
|
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
|
// forward.
|
||||
|
func (s *items) insertAt(index int, item Item) { |
||||
|
*s = append(*s, nil) |
||||
|
if index < len(*s) { |
||||
|
copy((*s)[index+1:], (*s)[index:]) |
||||
|
} |
||||
|
(*s)[index] = item |
||||
|
} |
||||
|
|
||||
|
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
|
// back.
|
||||
|
func (s *items) removeAt(index int) Item { |
||||
|
item := (*s)[index] |
||||
|
copy((*s)[index:], (*s)[index+1:]) |
||||
|
(*s)[len(*s)-1] = nil |
||||
|
*s = (*s)[:len(*s)-1] |
||||
|
return item |
||||
|
} |
||||
|
|
||||
|
// pop removes and returns the last element in the list.
|
||||
|
func (s *items) pop() (out Item) { |
||||
|
index := len(*s) - 1 |
||||
|
out = (*s)[index] |
||||
|
(*s)[index] = nil |
||||
|
*s = (*s)[:index] |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// truncate truncates this instance at index so that it contains only the
|
||||
|
// first index items. index must be less than or equal to length.
|
||||
|
func (s *items) truncate(index int) { |
||||
|
var toClear items |
||||
|
*s, toClear = (*s)[:index], (*s)[index:] |
||||
|
for len(toClear) > 0 { |
||||
|
toClear = toClear[copy(toClear, nilItems):] |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// find returns the index where the given item should be inserted into this
|
||||
|
// list. 'found' is true if the item already exists in the list at the given
|
||||
|
// index.
|
||||
|
func (s items) find(item Item) (index int, found bool) { |
||||
|
i := sort.Search(len(s), func(i int) bool { |
||||
|
return item.Less(s[i]) |
||||
|
}) |
||||
|
if i > 0 && !s[i-1].Less(item) { |
||||
|
return i - 1, true |
||||
|
} |
||||
|
return i, false |
||||
|
} |
||||
|
|
||||
|
// children stores child nodes in a node.
|
||||
|
type children []*node |
||||
|
|
||||
|
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
|
// forward.
|
||||
|
func (s *children) insertAt(index int, n *node) { |
||||
|
*s = append(*s, nil) |
||||
|
if index < len(*s) { |
||||
|
copy((*s)[index+1:], (*s)[index:]) |
||||
|
} |
||||
|
(*s)[index] = n |
||||
|
} |
||||
|
|
||||
|
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
|
// back.
|
||||
|
func (s *children) removeAt(index int) *node { |
||||
|
n := (*s)[index] |
||||
|
copy((*s)[index:], (*s)[index+1:]) |
||||
|
(*s)[len(*s)-1] = nil |
||||
|
*s = (*s)[:len(*s)-1] |
||||
|
return n |
||||
|
} |
||||
|
|
||||
|
// pop removes and returns the last element in the list.
|
||||
|
func (s *children) pop() (out *node) { |
||||
|
index := len(*s) - 1 |
||||
|
out = (*s)[index] |
||||
|
(*s)[index] = nil |
||||
|
*s = (*s)[:index] |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// truncate truncates this instance at index so that it contains only the
|
||||
|
// first index children. index must be less than or equal to length.
|
||||
|
func (s *children) truncate(index int) { |
||||
|
var toClear children |
||||
|
*s, toClear = (*s)[:index], (*s)[index:] |
||||
|
for len(toClear) > 0 { |
||||
|
toClear = toClear[copy(toClear, nilChildren):] |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// node is an internal node in a tree.
|
||||
|
//
|
||||
|
// It must at all times maintain the invariant that either
|
||||
|
// * len(children) == 0, len(items) unconstrained
|
||||
|
// * len(children) == len(items) + 1
|
||||
|
type node struct { |
||||
|
items items |
||||
|
children children |
||||
|
t *BTree |
||||
|
} |
||||
|
|
||||
|
// split splits the given node at the given index. The current node shrinks,
|
||||
|
// and this function returns the item that existed at that index and a new node
|
||||
|
// containing all items/children after it.
|
||||
|
func (n *node) split(i int) (Item, *node) { |
||||
|
item := n.items[i] |
||||
|
next := n.t.newNode() |
||||
|
next.items = append(next.items, n.items[i+1:]...) |
||||
|
n.items.truncate(i) |
||||
|
if len(n.children) > 0 { |
||||
|
next.children = append(next.children, n.children[i+1:]...) |
||||
|
n.children.truncate(i + 1) |
||||
|
} |
||||
|
return item, next |
||||
|
} |
||||
|
|
||||
|
// maybeSplitChild checks if a child should be split, and if so splits it.
|
||||
|
// Returns whether or not a split occurred.
|
||||
|
func (n *node) maybeSplitChild(i, maxItems int) bool { |
||||
|
if len(n.children[i].items) < maxItems { |
||||
|
return false |
||||
|
} |
||||
|
first := n.children[i] |
||||
|
item, second := first.split(maxItems / 2) |
||||
|
n.items.insertAt(i, item) |
||||
|
n.children.insertAt(i+1, second) |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
// insert inserts an item into the subtree rooted at this node, making sure
|
||||
|
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
|
||||
|
// be found/replaced by insert, it will be returned.
|
||||
|
func (n *node) insert(item Item, maxItems int) Item { |
||||
|
i, found := n.items.find(item) |
||||
|
if found { |
||||
|
out := n.items[i] |
||||
|
n.items[i] = item |
||||
|
return out |
||||
|
} |
||||
|
if len(n.children) == 0 { |
||||
|
n.items.insertAt(i, item) |
||||
|
return nil |
||||
|
} |
||||
|
if n.maybeSplitChild(i, maxItems) { |
||||
|
inTree := n.items[i] |
||||
|
switch { |
||||
|
case item.Less(inTree): |
||||
|
// no change, we want first split node
|
||||
|
case inTree.Less(item): |
||||
|
i++ // we want second split node
|
||||
|
default: |
||||
|
out := n.items[i] |
||||
|
n.items[i] = item |
||||
|
return out |
||||
|
} |
||||
|
} |
||||
|
return n.children[i].insert(item, maxItems) |
||||
|
} |
||||
|
|
||||
|
// get finds the given key in the subtree and returns it.
|
||||
|
func (n *node) get(key Item) Item { |
||||
|
i, found := n.items.find(key) |
||||
|
if found { |
||||
|
return n.items[i] |
||||
|
} else if len(n.children) > 0 { |
||||
|
return n.children[i].get(key) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// min returns the first item in the subtree.
|
||||
|
func min(n *node) Item { |
||||
|
if n == nil { |
||||
|
return nil |
||||
|
} |
||||
|
for len(n.children) > 0 { |
||||
|
n = n.children[0] |
||||
|
} |
||||
|
if len(n.items) == 0 { |
||||
|
return nil |
||||
|
} |
||||
|
return n.items[0] |
||||
|
} |
||||
|
|
||||
|
// max returns the last item in the subtree.
|
||||
|
func max(n *node) Item { |
||||
|
if n == nil { |
||||
|
return nil |
||||
|
} |
||||
|
for len(n.children) > 0 { |
||||
|
n = n.children[len(n.children)-1] |
||||
|
} |
||||
|
if len(n.items) == 0 { |
||||
|
return nil |
||||
|
} |
||||
|
return n.items[len(n.items)-1] |
||||
|
} |
||||
|
|
||||
|
// toRemove details what item to remove in a node.remove call.
|
||||
|
type toRemove int |
||||
|
|
||||
|
const ( |
||||
|
removeItem toRemove = iota // removes the given item
|
||||
|
removeMin // removes smallest item in the subtree
|
||||
|
removeMax // removes largest item in the subtree
|
||||
|
) |
||||
|
|
||||
|
// remove removes an item from the subtree rooted at this node.
|
||||
|
func (n *node) remove(item Item, minItems int, typ toRemove) Item { |
||||
|
var i int |
||||
|
var found bool |
||||
|
switch typ { |
||||
|
case removeMax: |
||||
|
if len(n.children) == 0 { |
||||
|
return n.items.pop() |
||||
|
} |
||||
|
i = len(n.items) |
||||
|
case removeMin: |
||||
|
if len(n.children) == 0 { |
||||
|
return n.items.removeAt(0) |
||||
|
} |
||||
|
i = 0 |
||||
|
case removeItem: |
||||
|
i, found = n.items.find(item) |
||||
|
if len(n.children) == 0 { |
||||
|
if found { |
||||
|
return n.items.removeAt(i) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
default: |
||||
|
panic("invalid type") |
||||
|
} |
||||
|
// If we get to here, we have children.
|
||||
|
child := n.children[i] |
||||
|
if len(child.items) <= minItems { |
||||
|
return n.growChildAndRemove(i, item, minItems, typ) |
||||
|
} |
||||
|
// Either we had enough items to begin with, or we've done some
|
||||
|
// merging/stealing, because we've got enough now and we're ready to return
|
||||
|
// stuff.
|
||||
|
if found { |
||||
|
// The item exists at index 'i', and the child we've selected can give us a
|
||||
|
// predecessor, since if we've gotten here it's got > minItems items in it.
|
||||
|
out := n.items[i] |
||||
|
// We use our special-case 'remove' call with typ=maxItem to pull the
|
||||
|
// predecessor of item i (the rightmost leaf of our immediate left child)
|
||||
|
// and set it into where we pulled the item from.
|
||||
|
n.items[i] = child.remove(nil, minItems, removeMax) |
||||
|
return out |
||||
|
} |
||||
|
// Final recursive call. Once we're here, we know that the item isn't in this
|
||||
|
// node and that the child is big enough to remove from.
|
||||
|
return child.remove(item, minItems, typ) |
||||
|
} |
||||
|
|
||||
|
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
|
||||
|
// item from it while keeping it at minItems, then calls remove to actually
|
||||
|
// remove it.
|
||||
|
//
|
||||
|
// Most documentation says we have to do two sets of special casing:
|
||||
|
// 1) item is in this node
|
||||
|
// 2) item is in child
|
||||
|
// In both cases, we need to handle the two subcases:
|
||||
|
// A) node has enough values that it can spare one
|
||||
|
// B) node doesn't have enough values
|
||||
|
// For the latter, we have to check:
|
||||
|
// a) left sibling has node to spare
|
||||
|
// b) right sibling has node to spare
|
||||
|
// c) we must merge
|
||||
|
// To simplify our code here, we handle cases #1 and #2 the same:
|
||||
|
// If a node doesn't have enough items, we make sure it does (using a,b,c).
|
||||
|
// We then simply redo our remove call, and the second time (regardless of
|
||||
|
// whether we're in case 1 or 2), we'll have enough items and can guarantee
|
||||
|
// that we hit case A.
|
||||
|
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { |
||||
|
child := n.children[i] |
||||
|
if i > 0 && len(n.children[i-1].items) > minItems { |
||||
|
// Steal from left child
|
||||
|
stealFrom := n.children[i-1] |
||||
|
stolenItem := stealFrom.items.pop() |
||||
|
child.items.insertAt(0, n.items[i-1]) |
||||
|
n.items[i-1] = stolenItem |
||||
|
if len(stealFrom.children) > 0 { |
||||
|
child.children.insertAt(0, stealFrom.children.pop()) |
||||
|
} |
||||
|
} else if i < len(n.items) && len(n.children[i+1].items) > minItems { |
||||
|
// steal from right child
|
||||
|
stealFrom := n.children[i+1] |
||||
|
stolenItem := stealFrom.items.removeAt(0) |
||||
|
child.items = append(child.items, n.items[i]) |
||||
|
n.items[i] = stolenItem |
||||
|
if len(stealFrom.children) > 0 { |
||||
|
child.children = append(child.children, stealFrom.children.removeAt(0)) |
||||
|
} |
||||
|
} else { |
||||
|
if i >= len(n.items) { |
||||
|
i-- |
||||
|
child = n.children[i] |
||||
|
} |
||||
|
// merge with right child
|
||||
|
mergeItem := n.items.removeAt(i) |
||||
|
mergeChild := n.children.removeAt(i + 1) |
||||
|
child.items = append(child.items, mergeItem) |
||||
|
child.items = append(child.items, mergeChild.items...) |
||||
|
child.children = append(child.children, mergeChild.children...) |
||||
|
n.t.freeNode(mergeChild) |
||||
|
} |
||||
|
return n.remove(item, minItems, typ) |
||||
|
} |
||||
|
|
||||
|
type direction int |
||||
|
|
||||
|
const ( |
||||
|
descend = direction(-1) |
||||
|
ascend = direction(+1) |
||||
|
) |
||||
|
|
||||
|
// iterate provides a simple method for iterating over elements in the tree.
|
||||
|
//
|
||||
|
// When ascending, the 'start' should be less than 'stop' and when descending,
|
||||
|
// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
|
||||
|
// will force the iterator to include the first item when it equals 'start',
|
||||
|
// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
|
||||
|
// "greaterThan" or "lessThan" queries.
|
||||
|
func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { |
||||
|
var ok bool |
||||
|
switch dir { |
||||
|
case ascend: |
||||
|
for i := 0; i < len(n.items); i++ { |
||||
|
if start != nil && n.items[i].Less(start) { |
||||
|
continue |
||||
|
} |
||||
|
if len(n.children) > 0 { |
||||
|
if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { |
||||
|
return hit, false |
||||
|
} |
||||
|
} |
||||
|
if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { |
||||
|
hit = true |
||||
|
continue |
||||
|
} |
||||
|
hit = true |
||||
|
if stop != nil && !n.items[i].Less(stop) { |
||||
|
return hit, false |
||||
|
} |
||||
|
if !iter(n.items[i]) { |
||||
|
return hit, false |
||||
|
} |
||||
|
} |
||||
|
if len(n.children) > 0 { |
||||
|
if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { |
||||
|
return hit, false |
||||
|
} |
||||
|
} |
||||
|
case descend: |
||||
|
for i := len(n.items) - 1; i >= 0; i-- { |
||||
|
if start != nil && !n.items[i].Less(start) { |
||||
|
if !includeStart || hit || start.Less(n.items[i]) { |
||||
|
continue |
||||
|
} |
||||
|
} |
||||
|
if len(n.children) > 0 { |
||||
|
if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { |
||||
|
return hit, false |
||||
|
} |
||||
|
} |
||||
|
if stop != nil && !stop.Less(n.items[i]) { |
||||
|
return hit, false // continue
|
||||
|
} |
||||
|
hit = true |
||||
|
if !iter(n.items[i]) { |
||||
|
return hit, false |
||||
|
} |
||||
|
} |
||||
|
if len(n.children) > 0 { |
||||
|
if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { |
||||
|
return hit, false |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
return hit, true |
||||
|
} |
||||
|
|
||||
|
// Used for testing/debugging purposes.
|
||||
|
func (n *node) print(w io.Writer, level int) { |
||||
|
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) |
||||
|
for _, c := range n.children { |
||||
|
c.print(w, level+1) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// BTree is an implementation of a B-Tree.
|
||||
|
//
|
||||
|
// BTree stores Item instances in an ordered structure, allowing easy insertion,
|
||||
|
// removal, and iteration.
|
||||
|
//
|
||||
|
// Write operations are not safe for concurrent mutation by multiple
|
||||
|
// goroutines, but Read operations are.
|
||||
|
type BTree struct { |
||||
|
degree int |
||||
|
length int |
||||
|
root *node |
||||
|
freelist *FreeList |
||||
|
} |
||||
|
|
||||
|
// maxItems returns the max number of items to allow per node.
|
||||
|
func (t *BTree) maxItems() int { |
||||
|
return t.degree*2 - 1 |
||||
|
} |
||||
|
|
||||
|
// minItems returns the min number of items to allow per node (ignored for the
|
||||
|
// root node).
|
||||
|
func (t *BTree) minItems() int { |
||||
|
return t.degree - 1 |
||||
|
} |
||||
|
|
||||
|
func (t *BTree) newNode() (n *node) { |
||||
|
n = t.freelist.newNode() |
||||
|
n.t = t |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (t *BTree) freeNode(n *node) { |
||||
|
// clear to allow GC
|
||||
|
n.items.truncate(0) |
||||
|
n.children.truncate(0) |
||||
|
n.t = nil // clear to allow GC
|
||||
|
t.freelist.freeNode(n) |
||||
|
} |
||||
|
|
||||
|
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
|
||||
|
// already equals the given one, it is removed from the tree and returned.
|
||||
|
// Otherwise, nil is returned.
|
||||
|
//
|
||||
|
// nil cannot be added to the tree (will panic).
|
||||
|
func (t *BTree) ReplaceOrInsert(item Item) Item { |
||||
|
if item == nil { |
||||
|
panic("nil item being added to BTree") |
||||
|
} |
||||
|
if t.root == nil { |
||||
|
t.root = t.newNode() |
||||
|
t.root.items = append(t.root.items, item) |
||||
|
t.length++ |
||||
|
return nil |
||||
|
} else if len(t.root.items) >= t.maxItems() { |
||||
|
item2, second := t.root.split(t.maxItems() / 2) |
||||
|
oldroot := t.root |
||||
|
t.root = t.newNode() |
||||
|
t.root.items = append(t.root.items, item2) |
||||
|
t.root.children = append(t.root.children, oldroot, second) |
||||
|
} |
||||
|
out := t.root.insert(item, t.maxItems()) |
||||
|
if out == nil { |
||||
|
t.length++ |
||||
|
} |
||||
|
return out |
||||
|
} |
||||
|
|
||||
|
// Delete removes an item equal to the passed in item from the tree, returning
|
||||
|
// it. If no such item exists, returns nil.
|
||||
|
func (t *BTree) Delete(item Item) Item { |
||||
|
return t.deleteItem(item, removeItem) |
||||
|
} |
||||
|
|
||||
|
// DeleteMin removes the smallest item in the tree and returns it.
|
||||
|
// If no such item exists, returns nil.
|
||||
|
func (t *BTree) DeleteMin() Item { |
||||
|
return t.deleteItem(nil, removeMin) |
||||
|
} |
||||
|
|
||||
|
// DeleteMax removes the largest item in the tree and returns it.
|
||||
|
// If no such item exists, returns nil.
|
||||
|
func (t *BTree) DeleteMax() Item { |
||||
|
return t.deleteItem(nil, removeMax) |
||||
|
} |
||||
|
|
||||
|
func (t *BTree) deleteItem(item Item, typ toRemove) Item { |
||||
|
if t.root == nil || len(t.root.items) == 0 { |
||||
|
return nil |
||||
|
} |
||||
|
out := t.root.remove(item, t.minItems(), typ) |
||||
|
if len(t.root.items) == 0 && len(t.root.children) > 0 { |
||||
|
oldroot := t.root |
||||
|
t.root = t.root.children[0] |
||||
|
t.freeNode(oldroot) |
||||
|
} |
||||
|
if out != nil { |
||||
|
t.length-- |
||||
|
} |
||||
|
return out |
||||
|
} |
||||
|
|
||||
|
// AscendRange calls the iterator for every value in the tree within the range
|
||||
|
// [greaterOrEqual, lessThan), until iterator returns false.
|
||||
|
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { |
||||
|
if t.root == nil { |
||||
|
return |
||||
|
} |
||||
|
t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) |
||||
|
} |
||||
|
|
||||
|
// AscendLessThan calls the iterator for every value in the tree within the range
|
||||
|
// [first, pivot), until iterator returns false.
|
||||
|
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { |
||||
|
if t.root == nil { |
||||
|
return |
||||
|
} |
||||
|
t.root.iterate(ascend, nil, pivot, false, false, iterator) |
||||
|
} |
||||
|
|
||||
|
// AscendGreaterOrEqual calls the iterator for every value in the tree within
|
||||
|
// the range [pivot, last], until iterator returns false.
|
||||
|
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { |
||||
|
if t.root == nil { |
||||
|
return |
||||
|
} |
||||
|
t.root.iterate(ascend, pivot, nil, true, false, iterator) |
||||
|
} |
||||
|
|
||||
|
// Ascend calls the iterator for every value in the tree within the range
|
||||
|
// [first, last], until iterator returns false.
|
||||
|
func (t *BTree) Ascend(iterator ItemIterator) { |
||||
|
if t.root == nil { |
||||
|
return |
||||
|
} |
||||
|
t.root.iterate(ascend, nil, nil, false, false, iterator) |
||||
|
} |
||||
|
|
||||
|
// DescendRange calls the iterator for every value in the tree within the range
|
||||
|
// [lessOrEqual, greaterThan), until iterator returns false.
|
||||
|
func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { |
||||
|
if t.root == nil { |
||||
|
return |
||||
|
} |
||||
|
t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) |
||||
|
} |
||||
|
|
||||
|
// DescendLessOrEqual calls the iterator for every value in the tree within the range
|
||||
|
// [pivot, first], until iterator returns false.
|
||||
|
func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { |
||||
|
if t.root == nil { |
||||
|
return |
||||
|
} |
||||
|
t.root.iterate(descend, pivot, nil, true, false, iterator) |
||||
|
} |
||||
|
|
||||
|
// DescendGreaterThan calls the iterator for every value in the tree within
|
||||
|
// the range (pivot, last], until iterator returns false.
|
||||
|
func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { |
||||
|
if t.root == nil { |
||||
|
return |
||||
|
} |
||||
|
t.root.iterate(descend, nil, pivot, false, false, iterator) |
||||
|
} |
||||
|
|
||||
|
// Descend calls the iterator for every value in the tree within the range
|
||||
|
// [last, first], until iterator returns false.
|
||||
|
func (t *BTree) Descend(iterator ItemIterator) { |
||||
|
if t.root == nil { |
||||
|
return |
||||
|
} |
||||
|
t.root.iterate(descend, nil, nil, false, false, iterator) |
||||
|
} |
||||
|
|
||||
|
// Get looks for the key item in the tree, returning it. It returns nil if
|
||||
|
// unable to find that item.
|
||||
|
func (t *BTree) Get(key Item) Item { |
||||
|
if t.root == nil { |
||||
|
return nil |
||||
|
} |
||||
|
return t.root.get(key) |
||||
|
} |
||||
|
|
||||
|
// Min returns the smallest item in the tree, or nil if the tree is empty.
|
||||
|
func (t *BTree) Min() Item { |
||||
|
return min(t.root) |
||||
|
} |
||||
|
|
||||
|
// Max returns the largest item in the tree, or nil if the tree is empty.
|
||||
|
func (t *BTree) Max() Item { |
||||
|
return max(t.root) |
||||
|
} |
||||
|
|
||||
|
// Has returns true if the given key is in the tree.
|
||||
|
func (t *BTree) Has(key Item) bool { |
||||
|
return t.Get(key) != nil |
||||
|
} |
||||
|
|
||||
|
// Len returns the number of items currently in the tree.
|
||||
|
func (t *BTree) Len() int { |
||||
|
return t.length |
||||
|
} |
||||
|
|
||||
|
// Int implements the Item interface for integers.
|
||||
|
type Int int |
||||
|
|
||||
|
// Less returns true if int(a) < int(b).
|
||||
|
func (a Int) Less(b Item) bool { |
||||
|
return a < b.(Int) |
||||
|
} |
@ -0,0 +1,76 @@ |
|||||
|
// Copyright 2014 Google Inc.
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
// you may not use this file except in compliance with the License.
|
||||
|
// You may obtain a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
// See the License for the specific language governing permissions and
|
||||
|
// limitations under the License.
|
||||
|
|
||||
|
// +build ignore
|
||||
|
|
||||
|
// This binary compares memory usage between btree and gollrb.
|
||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"flag" |
||||
|
"fmt" |
||||
|
"math/rand" |
||||
|
"runtime" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/google/btree" |
||||
|
"github.com/petar/GoLLRB/llrb" |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
size = flag.Int("size", 1000000, "size of the tree to build") |
||||
|
degree = flag.Int("degree", 8, "degree of btree") |
||||
|
gollrb = flag.Bool("llrb", false, "use llrb instead of btree") |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
flag.Parse() |
||||
|
vals := rand.Perm(*size) |
||||
|
var t, v interface{} |
||||
|
v = vals |
||||
|
var stats runtime.MemStats |
||||
|
for i := 0; i < 10; i++ { |
||||
|
runtime.GC() |
||||
|
} |
||||
|
fmt.Println("-------- BEFORE ----------") |
||||
|
runtime.ReadMemStats(&stats) |
||||
|
fmt.Printf("%+v\n", stats) |
||||
|
start := time.Now() |
||||
|
if *gollrb { |
||||
|
tr := llrb.New() |
||||
|
for _, v := range vals { |
||||
|
tr.ReplaceOrInsert(llrb.Int(v)) |
||||
|
} |
||||
|
t = tr // keep it around
|
||||
|
} else { |
||||
|
tr := btree.New(*degree) |
||||
|
for _, v := range vals { |
||||
|
tr.ReplaceOrInsert(btree.Int(v)) |
||||
|
} |
||||
|
t = tr // keep it around
|
||||
|
} |
||||
|
fmt.Printf("%v inserts in %v\n", *size, time.Since(start)) |
||||
|
fmt.Println("-------- AFTER ----------") |
||||
|
runtime.ReadMemStats(&stats) |
||||
|
fmt.Printf("%+v\n", stats) |
||||
|
for i := 0; i < 10; i++ { |
||||
|
runtime.GC() |
||||
|
} |
||||
|
fmt.Println("-------- AFTER GC ----------") |
||||
|
runtime.ReadMemStats(&stats) |
||||
|
fmt.Printf("%+v\n", stats) |
||||
|
if t == v { |
||||
|
fmt.Println("to make sure vals and tree aren't GC'd") |
||||
|
} |
||||
|
} |
@ -0,0 +1,563 @@ |
|||||
|
// Copyright 2014 Google Inc.
|
||||
|
//
|
||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
// you may not use this file except in compliance with the License.
|
||||
|
// You may obtain a copy of the License at
|
||||
|
//
|
||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
//
|
||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
// See the License for the specific language governing permissions and
|
||||
|
// limitations under the License.
|
||||
|
|
||||
|
package btree |
||||
|
|
||||
|
import ( |
||||
|
"flag" |
||||
|
"fmt" |
||||
|
"math/rand" |
||||
|
"reflect" |
||||
|
"sort" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
seed := time.Now().Unix() |
||||
|
fmt.Println(seed) |
||||
|
rand.Seed(seed) |
||||
|
} |
||||
|
|
||||
|
// perm returns a random permutation of n Int items in the range [0, n).
|
||||
|
func perm(n int) (out []Item) { |
||||
|
for _, v := range rand.Perm(n) { |
||||
|
out = append(out, Int(v)) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// rang returns an ordered list of Int items in the range [0, n).
|
||||
|
func rang(n int) (out []Item) { |
||||
|
for i := 0; i < n; i++ { |
||||
|
out = append(out, Int(i)) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// all extracts all items from a tree in order as a slice.
|
||||
|
func all(t *BTree) (out []Item) { |
||||
|
t.Ascend(func(a Item) bool { |
||||
|
out = append(out, a) |
||||
|
return true |
||||
|
}) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// rangerev returns a reversed ordered list of Int items in the range [0, n).
|
||||
|
func rangrev(n int) (out []Item) { |
||||
|
for i := n - 1; i >= 0; i-- { |
||||
|
out = append(out, Int(i)) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// allrev extracts all items from a tree in reverse order as a slice.
|
||||
|
func allrev(t *BTree) (out []Item) { |
||||
|
t.Descend(func(a Item) bool { |
||||
|
out = append(out, a) |
||||
|
return true |
||||
|
}) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
var btreeDegree = flag.Int("degree", 32, "B-Tree degree") |
||||
|
|
||||
|
func TestBTree(t *testing.T) { |
||||
|
tr := New(*btreeDegree) |
||||
|
const treeSize = 10000 |
||||
|
for i := 0; i < 10; i++ { |
||||
|
if min := tr.Min(); min != nil { |
||||
|
t.Fatalf("empty min, got %+v", min) |
||||
|
} |
||||
|
if max := tr.Max(); max != nil { |
||||
|
t.Fatalf("empty max, got %+v", max) |
||||
|
} |
||||
|
for _, item := range perm(treeSize) { |
||||
|
if x := tr.ReplaceOrInsert(item); x != nil { |
||||
|
t.Fatal("insert found item", item) |
||||
|
} |
||||
|
} |
||||
|
for _, item := range perm(treeSize) { |
||||
|
if x := tr.ReplaceOrInsert(item); x == nil { |
||||
|
t.Fatal("insert didn't find item", item) |
||||
|
} |
||||
|
} |
||||
|
if min, want := tr.Min(), Item(Int(0)); min != want { |
||||
|
t.Fatalf("min: want %+v, got %+v", want, min) |
||||
|
} |
||||
|
if max, want := tr.Max(), Item(Int(treeSize-1)); max != want { |
||||
|
t.Fatalf("max: want %+v, got %+v", want, max) |
||||
|
} |
||||
|
got := all(tr) |
||||
|
want := rang(treeSize) |
||||
|
if !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
|
||||
|
gotrev := allrev(tr) |
||||
|
wantrev := rangrev(treeSize) |
||||
|
if !reflect.DeepEqual(gotrev, wantrev) { |
||||
|
t.Fatalf("mismatch:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
|
||||
|
for _, item := range perm(treeSize) { |
||||
|
if x := tr.Delete(item); x == nil { |
||||
|
t.Fatalf("didn't find %v", item) |
||||
|
} |
||||
|
} |
||||
|
if got = all(tr); len(got) > 0 { |
||||
|
t.Fatalf("some left!: %v", got) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func ExampleBTree() { |
||||
|
tr := New(*btreeDegree) |
||||
|
for i := Int(0); i < 10; i++ { |
||||
|
tr.ReplaceOrInsert(i) |
||||
|
} |
||||
|
fmt.Println("len: ", tr.Len()) |
||||
|
fmt.Println("get3: ", tr.Get(Int(3))) |
||||
|
fmt.Println("get100: ", tr.Get(Int(100))) |
||||
|
fmt.Println("del4: ", tr.Delete(Int(4))) |
||||
|
fmt.Println("del100: ", tr.Delete(Int(100))) |
||||
|
fmt.Println("replace5: ", tr.ReplaceOrInsert(Int(5))) |
||||
|
fmt.Println("replace100:", tr.ReplaceOrInsert(Int(100))) |
||||
|
fmt.Println("min: ", tr.Min()) |
||||
|
fmt.Println("delmin: ", tr.DeleteMin()) |
||||
|
fmt.Println("max: ", tr.Max()) |
||||
|
fmt.Println("delmax: ", tr.DeleteMax()) |
||||
|
fmt.Println("len: ", tr.Len()) |
||||
|
// Output:
|
||||
|
// len: 10
|
||||
|
// get3: 3
|
||||
|
// get100: <nil>
|
||||
|
// del4: 4
|
||||
|
// del100: <nil>
|
||||
|
// replace5: 5
|
||||
|
// replace100: <nil>
|
||||
|
// min: 0
|
||||
|
// delmin: 0
|
||||
|
// max: 100
|
||||
|
// delmax: 100
|
||||
|
// len: 8
|
||||
|
} |
||||
|
|
||||
|
func TestDeleteMin(t *testing.T) { |
||||
|
tr := New(3) |
||||
|
for _, v := range perm(100) { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
var got []Item |
||||
|
for v := tr.DeleteMin(); v != nil; v = tr.DeleteMin() { |
||||
|
got = append(got, v) |
||||
|
} |
||||
|
if want := rang(100); !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestDeleteMax(t *testing.T) { |
||||
|
tr := New(3) |
||||
|
for _, v := range perm(100) { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
var got []Item |
||||
|
for v := tr.DeleteMax(); v != nil; v = tr.DeleteMax() { |
||||
|
got = append(got, v) |
||||
|
} |
||||
|
// Reverse our list.
|
||||
|
for i := 0; i < len(got)/2; i++ { |
||||
|
got[i], got[len(got)-i-1] = got[len(got)-i-1], got[i] |
||||
|
} |
||||
|
if want := rang(100); !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestAscendRange(t *testing.T) { |
||||
|
tr := New(2) |
||||
|
for _, v := range perm(100) { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
var got []Item |
||||
|
tr.AscendRange(Int(40), Int(60), func(a Item) bool { |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rang(100)[40:60]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
got = got[:0] |
||||
|
tr.AscendRange(Int(40), Int(60), func(a Item) bool { |
||||
|
if a.(Int) > 50 { |
||||
|
return false |
||||
|
} |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestDescendRange(t *testing.T) { |
||||
|
tr := New(2) |
||||
|
for _, v := range perm(100) { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
var got []Item |
||||
|
tr.DescendRange(Int(60), Int(40), func(a Item) bool { |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rangrev(100)[39:59]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
got = got[:0] |
||||
|
tr.DescendRange(Int(60), Int(40), func(a Item) bool { |
||||
|
if a.(Int) < 50 { |
||||
|
return false |
||||
|
} |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("descendrange:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
} |
||||
|
func TestAscendLessThan(t *testing.T) { |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range perm(100) { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
var got []Item |
||||
|
tr.AscendLessThan(Int(60), func(a Item) bool { |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rang(100)[:60]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
got = got[:0] |
||||
|
tr.AscendLessThan(Int(60), func(a Item) bool { |
||||
|
if a.(Int) > 50 { |
||||
|
return false |
||||
|
} |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rang(100)[:51]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestDescendLessOrEqual(t *testing.T) { |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range perm(100) { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
var got []Item |
||||
|
tr.DescendLessOrEqual(Int(40), func(a Item) bool { |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rangrev(100)[59:]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
got = got[:0] |
||||
|
tr.DescendLessOrEqual(Int(60), func(a Item) bool { |
||||
|
if a.(Int) < 50 { |
||||
|
return false |
||||
|
} |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rangrev(100)[39:50]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("descendlessorequal:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
} |
||||
|
func TestAscendGreaterOrEqual(t *testing.T) { |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range perm(100) { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
var got []Item |
||||
|
tr.AscendGreaterOrEqual(Int(40), func(a Item) bool { |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rang(100)[40:]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
got = got[:0] |
||||
|
tr.AscendGreaterOrEqual(Int(40), func(a Item) bool { |
||||
|
if a.(Int) > 50 { |
||||
|
return false |
||||
|
} |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rang(100)[40:51]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("ascendrange:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestDescendGreaterThan(t *testing.T) { |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range perm(100) { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
var got []Item |
||||
|
tr.DescendGreaterThan(Int(40), func(a Item) bool { |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rangrev(100)[:59]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
got = got[:0] |
||||
|
tr.DescendGreaterThan(Int(40), func(a Item) bool { |
||||
|
if a.(Int) < 50 { |
||||
|
return false |
||||
|
} |
||||
|
got = append(got, a) |
||||
|
return true |
||||
|
}) |
||||
|
if want := rangrev(100)[:50]; !reflect.DeepEqual(got, want) { |
||||
|
t.Fatalf("descendgreaterthan:\n got: %v\nwant: %v", got, want) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
const benchmarkTreeSize = 10000 |
||||
|
|
||||
|
func BenchmarkInsert(b *testing.B) { |
||||
|
b.StopTimer() |
||||
|
insertP := perm(benchmarkTreeSize) |
||||
|
b.StartTimer() |
||||
|
i := 0 |
||||
|
for i < b.N { |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, item := range insertP { |
||||
|
tr.ReplaceOrInsert(item) |
||||
|
i++ |
||||
|
if i >= b.N { |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDelete(b *testing.B) { |
||||
|
b.StopTimer() |
||||
|
insertP := perm(benchmarkTreeSize) |
||||
|
removeP := perm(benchmarkTreeSize) |
||||
|
b.StartTimer() |
||||
|
i := 0 |
||||
|
for i < b.N { |
||||
|
b.StopTimer() |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range insertP { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
b.StartTimer() |
||||
|
for _, item := range removeP { |
||||
|
tr.Delete(item) |
||||
|
i++ |
||||
|
if i >= b.N { |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
if tr.Len() > 0 { |
||||
|
panic(tr.Len()) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkGet(b *testing.B) { |
||||
|
b.StopTimer() |
||||
|
insertP := perm(benchmarkTreeSize) |
||||
|
removeP := perm(benchmarkTreeSize) |
||||
|
b.StartTimer() |
||||
|
i := 0 |
||||
|
for i < b.N { |
||||
|
b.StopTimer() |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range insertP { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
b.StartTimer() |
||||
|
for _, item := range removeP { |
||||
|
tr.Get(item) |
||||
|
i++ |
||||
|
if i >= b.N { |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type byInts []Item |
||||
|
|
||||
|
func (a byInts) Len() int { |
||||
|
return len(a) |
||||
|
} |
||||
|
|
||||
|
func (a byInts) Less(i, j int) bool { |
||||
|
return a[i].(Int) < a[j].(Int) |
||||
|
} |
||||
|
|
||||
|
func (a byInts) Swap(i, j int) { |
||||
|
a[i], a[j] = a[j], a[i] |
||||
|
} |
||||
|
|
||||
|
func BenchmarkAscend(b *testing.B) { |
||||
|
arr := perm(benchmarkTreeSize) |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range arr { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
sort.Sort(byInts(arr)) |
||||
|
b.ResetTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
j := 0 |
||||
|
tr.Ascend(func(item Item) bool { |
||||
|
if item.(Int) != arr[j].(Int) { |
||||
|
b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) |
||||
|
} |
||||
|
j++ |
||||
|
return true |
||||
|
}) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDescend(b *testing.B) { |
||||
|
arr := perm(benchmarkTreeSize) |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range arr { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
sort.Sort(byInts(arr)) |
||||
|
b.ResetTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
j := len(arr) - 1 |
||||
|
tr.Descend(func(item Item) bool { |
||||
|
if item.(Int) != arr[j].(Int) { |
||||
|
b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) |
||||
|
} |
||||
|
j-- |
||||
|
return true |
||||
|
}) |
||||
|
} |
||||
|
} |
||||
|
func BenchmarkAscendRange(b *testing.B) { |
||||
|
arr := perm(benchmarkTreeSize) |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range arr { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
sort.Sort(byInts(arr)) |
||||
|
b.ResetTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
j := 100 |
||||
|
tr.AscendRange(Int(100), arr[len(arr)-100], func(item Item) bool { |
||||
|
if item.(Int) != arr[j].(Int) { |
||||
|
b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) |
||||
|
} |
||||
|
j++ |
||||
|
return true |
||||
|
}) |
||||
|
if j != len(arr)-100 { |
||||
|
b.Fatalf("expected: %v, got %v", len(arr)-100, j) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDescendRange(b *testing.B) { |
||||
|
arr := perm(benchmarkTreeSize) |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range arr { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
sort.Sort(byInts(arr)) |
||||
|
b.ResetTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
j := len(arr) - 100 |
||||
|
tr.DescendRange(arr[len(arr)-100], Int(100), func(item Item) bool { |
||||
|
if item.(Int) != arr[j].(Int) { |
||||
|
b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) |
||||
|
} |
||||
|
j-- |
||||
|
return true |
||||
|
}) |
||||
|
if j != 100 { |
||||
|
b.Fatalf("expected: %v, got %v", len(arr)-100, j) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
func BenchmarkAscendGreaterOrEqual(b *testing.B) { |
||||
|
arr := perm(benchmarkTreeSize) |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range arr { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
sort.Sort(byInts(arr)) |
||||
|
b.ResetTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
j := 100 |
||||
|
k := 0 |
||||
|
tr.AscendGreaterOrEqual(Int(100), func(item Item) bool { |
||||
|
if item.(Int) != arr[j].(Int) { |
||||
|
b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) |
||||
|
} |
||||
|
j++ |
||||
|
k++ |
||||
|
return true |
||||
|
}) |
||||
|
if j != len(arr) { |
||||
|
b.Fatalf("expected: %v, got %v", len(arr), j) |
||||
|
} |
||||
|
if k != len(arr)-100 { |
||||
|
b.Fatalf("expected: %v, got %v", len(arr)-100, k) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
func BenchmarkDescendLessOrEqual(b *testing.B) { |
||||
|
arr := perm(benchmarkTreeSize) |
||||
|
tr := New(*btreeDegree) |
||||
|
for _, v := range arr { |
||||
|
tr.ReplaceOrInsert(v) |
||||
|
} |
||||
|
sort.Sort(byInts(arr)) |
||||
|
b.ResetTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
j := len(arr) - 100 |
||||
|
k := len(arr) |
||||
|
tr.DescendLessOrEqual(arr[len(arr)-100], func(item Item) bool { |
||||
|
if item.(Int) != arr[j].(Int) { |
||||
|
b.Fatalf("mismatch: expected: %v, got %v", arr[j].(Int), item.(Int)) |
||||
|
} |
||||
|
j-- |
||||
|
k-- |
||||
|
return true |
||||
|
}) |
||||
|
if j != -1 { |
||||
|
b.Fatalf("expected: %v, got %v", -1, j) |
||||
|
} |
||||
|
if k != 99 { |
||||
|
b.Fatalf("expected: %v, got %v", 99, k) |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,7 @@ |
|||||
|
Copyright © 2012 Greg Jones (greg.jones@gmail.com) |
||||
|
|
||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: |
||||
|
|
||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. |
||||
|
|
||||
|
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
@ -0,0 +1,25 @@ |
|||||
|
httpcache |
||||
|
========= |
||||
|
|
||||
|
[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) |
||||
|
|
||||
|
A Transport for Go's http.Client that will cache responses according to the HTTP RFC |
||||
|
|
||||
|
Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses. |
||||
|
|
||||
|
It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). |
||||
|
|
||||
|
**Documentation:** http://godoc.org/github.com/gregjones/httpcache |
||||
|
|
||||
|
**License:** MIT (see LICENSE.txt) |
||||
|
|
||||
|
Cache backends |
||||
|
-------------- |
||||
|
|
||||
|
- The built-in 'memory' cache stores responses in an in-memory map. |
||||
|
- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. |
||||
|
- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. |
||||
|
- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. |
||||
|
- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). |
||||
|
- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. |
||||
|
- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. |
@ -0,0 +1,61 @@ |
|||||
|
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
|
||||
|
// to supplement an in-memory map with persistent storage
|
||||
|
//
|
||||
|
package diskcache |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"crypto/md5" |
||||
|
"encoding/hex" |
||||
|
"github.com/peterbourgon/diskv" |
||||
|
"io" |
||||
|
) |
||||
|
|
||||
|
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
|
||||
|
type Cache struct { |
||||
|
d *diskv.Diskv |
||||
|
} |
||||
|
|
||||
|
// Get returns the response corresponding to key if present
|
||||
|
func (c *Cache) Get(key string) (resp []byte, ok bool) { |
||||
|
key = keyToFilename(key) |
||||
|
resp, err := c.d.Read(key) |
||||
|
if err != nil { |
||||
|
return []byte{}, false |
||||
|
} |
||||
|
return resp, true |
||||
|
} |
||||
|
|
||||
|
// Set saves a response to the cache as key
|
||||
|
func (c *Cache) Set(key string, resp []byte) { |
||||
|
key = keyToFilename(key) |
||||
|
c.d.WriteStream(key, bytes.NewReader(resp), true) |
||||
|
} |
||||
|
|
||||
|
// Delete removes the response with key from the cache
|
||||
|
func (c *Cache) Delete(key string) { |
||||
|
key = keyToFilename(key) |
||||
|
c.d.Erase(key) |
||||
|
} |
||||
|
|
||||
|
func keyToFilename(key string) string { |
||||
|
h := md5.New() |
||||
|
io.WriteString(h, key) |
||||
|
return hex.EncodeToString(h.Sum(nil)) |
||||
|
} |
||||
|
|
||||
|
// New returns a new Cache that will store files in basePath
|
||||
|
func New(basePath string) *Cache { |
||||
|
return &Cache{ |
||||
|
d: diskv.New(diskv.Options{ |
||||
|
BasePath: basePath, |
||||
|
CacheSizeMax: 100 * 1024 * 1024, // 100MB
|
||||
|
}), |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
|
||||
|
// storage.
|
||||
|
func NewWithDiskv(d *diskv.Diskv) *Cache { |
||||
|
return &Cache{d} |
||||
|
} |
@ -0,0 +1,42 @@ |
|||||
|
package diskcache |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"io/ioutil" |
||||
|
"os" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestDiskCache(t *testing.T) { |
||||
|
tempDir, err := ioutil.TempDir("", "httpcache") |
||||
|
if err != nil { |
||||
|
t.Fatalf("TempDir: %v", err) |
||||
|
} |
||||
|
defer os.RemoveAll(tempDir) |
||||
|
|
||||
|
cache := New(tempDir) |
||||
|
|
||||
|
key := "testKey" |
||||
|
_, ok := cache.Get(key) |
||||
|
if ok { |
||||
|
t.Fatal("retrieved key before adding it") |
||||
|
} |
||||
|
|
||||
|
val := []byte("some bytes") |
||||
|
cache.Set(key, val) |
||||
|
|
||||
|
retVal, ok := cache.Get(key) |
||||
|
if !ok { |
||||
|
t.Fatal("could not retrieve an element we just added") |
||||
|
} |
||||
|
if !bytes.Equal(retVal, val) { |
||||
|
t.Fatal("retrieved a different value than what we put in") |
||||
|
} |
||||
|
|
||||
|
cache.Delete(key) |
||||
|
|
||||
|
_, ok = cache.Get(key) |
||||
|
if ok { |
||||
|
t.Fatal("deleted key still present") |
||||
|
} |
||||
|
} |
@ -0,0 +1,594 @@ |
|||||
|
// Package httpcache provides a http.RoundTripper implementation that works as a
|
||||
|
// mostly RFC-compliant cache for http responses.
|
||||
|
//
|
||||
|
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
|
||||
|
// and not for a shared proxy).
|
||||
|
//
|
||||
|
package httpcache |
||||
|
|
||||
|
import ( |
||||
|
"bufio" |
||||
|
"bytes" |
||||
|
"errors" |
||||
|
"fmt" |
||||
|
"io" |
||||
|
"log" |
||||
|
"net/http" |
||||
|
"net/http/httputil" |
||||
|
"strings" |
||||
|
"sync" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
stale = iota |
||||
|
fresh |
||||
|
transparent |
||||
|
// XFromCache is the header added to responses that are returned from the cache
|
||||
|
XFromCache = "X-From-Cache" |
||||
|
) |
||||
|
|
||||
|
// A Cache interface is used by the Transport to store and retrieve responses.
|
||||
|
type Cache interface { |
||||
|
// Get returns the []byte representation of a cached response and a bool
|
||||
|
// set to true if the value isn't empty
|
||||
|
Get(key string) (responseBytes []byte, ok bool) |
||||
|
// Set stores the []byte representation of a response against a key
|
||||
|
Set(key string, responseBytes []byte) |
||||
|
// Delete removes the value associated with the key
|
||||
|
Delete(key string) |
||||
|
} |
||||
|
|
||||
|
// cacheKey returns the cache key for req.
|
||||
|
func cacheKey(req *http.Request) string { |
||||
|
return req.URL.String() |
||||
|
} |
||||
|
|
||||
|
// CachedResponse returns the cached http.Response for req if present, and nil
|
||||
|
// otherwise.
|
||||
|
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { |
||||
|
cachedVal, ok := c.Get(cacheKey(req)) |
||||
|
if !ok { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
b := bytes.NewBuffer(cachedVal) |
||||
|
return http.ReadResponse(bufio.NewReader(b), req) |
||||
|
} |
||||
|
|
||||
|
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
|
||||
|
type MemoryCache struct { |
||||
|
mu sync.RWMutex |
||||
|
items map[string][]byte |
||||
|
} |
||||
|
|
||||
|
// Get returns the []byte representation of the response and true if present, false if not
|
||||
|
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { |
||||
|
c.mu.RLock() |
||||
|
resp, ok = c.items[key] |
||||
|
c.mu.RUnlock() |
||||
|
return resp, ok |
||||
|
} |
||||
|
|
||||
|
// Set saves response resp to the cache with key
|
||||
|
func (c *MemoryCache) Set(key string, resp []byte) { |
||||
|
c.mu.Lock() |
||||
|
c.items[key] = resp |
||||
|
c.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
// Delete removes key from the cache
|
||||
|
func (c *MemoryCache) Delete(key string) { |
||||
|
c.mu.Lock() |
||||
|
delete(c.items, key) |
||||
|
c.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
// NewMemoryCache returns a new Cache that will store items in an in-memory map
|
||||
|
func NewMemoryCache() *MemoryCache { |
||||
|
c := &MemoryCache{items: map[string][]byte{}} |
||||
|
return c |
||||
|
} |
||||
|
|
||||
|
// onEOFReader executes a function on reader EOF or close
|
||||
|
type onEOFReader struct { |
||||
|
rc io.ReadCloser |
||||
|
fn func() |
||||
|
} |
||||
|
|
||||
|
func (r *onEOFReader) Read(p []byte) (n int, err error) { |
||||
|
n, err = r.rc.Read(p) |
||||
|
if err == io.EOF { |
||||
|
r.runFunc() |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (r *onEOFReader) Close() error { |
||||
|
err := r.rc.Close() |
||||
|
r.runFunc() |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (r *onEOFReader) runFunc() { |
||||
|
if fn := r.fn; fn != nil { |
||||
|
fn() |
||||
|
r.fn = nil |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Transport is an implementation of http.RoundTripper that will return values from a cache
|
||||
|
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
|
||||
|
// to repeated requests allowing servers to return 304 / Not Modified
|
||||
|
type Transport struct { |
||||
|
// The RoundTripper interface actually used to make requests
|
||||
|
// If nil, http.DefaultTransport is used
|
||||
|
Transport http.RoundTripper |
||||
|
Cache Cache |
||||
|
// If true, responses returned from the cache will be given an extra header, X-From-Cache
|
||||
|
MarkCachedResponses bool |
||||
|
// guards modReq
|
||||
|
mu sync.RWMutex |
||||
|
// Mapping of original request => cloned
|
||||
|
modReq map[*http.Request]*http.Request |
||||
|
} |
||||
|
|
||||
|
// NewTransport returns a new Transport with the
|
||||
|
// provided Cache implementation and MarkCachedResponses set to true
|
||||
|
func NewTransport(c Cache) *Transport { |
||||
|
return &Transport{Cache: c, MarkCachedResponses: true} |
||||
|
} |
||||
|
|
||||
|
// Client returns an *http.Client that caches responses.
|
||||
|
func (t *Transport) Client() *http.Client { |
||||
|
return &http.Client{Transport: t} |
||||
|
} |
||||
|
|
||||
|
// varyMatches will return false unless all of the cached values for the headers listed in Vary
|
||||
|
// match the new request
|
||||
|
func varyMatches(cachedResp *http.Response, req *http.Request) bool { |
||||
|
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { |
||||
|
header = http.CanonicalHeaderKey(header) |
||||
|
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { |
||||
|
return false |
||||
|
} |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
// setModReq maintains a mapping between original requests and their associated cloned requests
|
||||
|
func (t *Transport) setModReq(orig, mod *http.Request) { |
||||
|
t.mu.Lock() |
||||
|
if t.modReq == nil { |
||||
|
t.modReq = make(map[*http.Request]*http.Request) |
||||
|
} |
||||
|
if mod == nil { |
||||
|
delete(t.modReq, orig) |
||||
|
} else { |
||||
|
t.modReq[orig] = mod |
||||
|
} |
||||
|
t.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
// RoundTrip takes a Request and returns a Response
|
||||
|
//
|
||||
|
// If there is a fresh Response already in cache, then it will be returned without connecting to
|
||||
|
// the server.
|
||||
|
//
|
||||
|
// If there is a stale Response, then any validators it contains will be set on the new request
|
||||
|
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
|
||||
|
// will be returned.
|
||||
|
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { |
||||
|
cacheKey := cacheKey(req) |
||||
|
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" |
||||
|
var cachedResp *http.Response |
||||
|
if cacheable { |
||||
|
cachedResp, err = CachedResponse(t.Cache, req) |
||||
|
} else { |
||||
|
// Need to invalidate an existing value
|
||||
|
t.Cache.Delete(cacheKey) |
||||
|
} |
||||
|
|
||||
|
transport := t.Transport |
||||
|
if transport == nil { |
||||
|
transport = http.DefaultTransport |
||||
|
} |
||||
|
|
||||
|
if cacheable && cachedResp != nil && err == nil { |
||||
|
if t.MarkCachedResponses { |
||||
|
cachedResp.Header.Set(XFromCache, "1") |
||||
|
} |
||||
|
|
||||
|
if varyMatches(cachedResp, req) { |
||||
|
// Can only use cached value if the new request doesn't Vary significantly
|
||||
|
freshness := getFreshness(cachedResp.Header, req.Header) |
||||
|
if freshness == fresh { |
||||
|
return cachedResp, nil |
||||
|
} |
||||
|
|
||||
|
if freshness == stale { |
||||
|
var req2 *http.Request |
||||
|
// Add validators if caller hasn't already done so
|
||||
|
etag := cachedResp.Header.Get("etag") |
||||
|
if etag != "" && req.Header.Get("etag") == "" { |
||||
|
req2 = cloneRequest(req) |
||||
|
req2.Header.Set("if-none-match", etag) |
||||
|
} |
||||
|
lastModified := cachedResp.Header.Get("last-modified") |
||||
|
if lastModified != "" && req.Header.Get("last-modified") == "" { |
||||
|
if req2 == nil { |
||||
|
req2 = cloneRequest(req) |
||||
|
} |
||||
|
req2.Header.Set("if-modified-since", lastModified) |
||||
|
} |
||||
|
if req2 != nil { |
||||
|
// Associate original request with cloned request so we can refer to
|
||||
|
// it in CancelRequest()
|
||||
|
t.setModReq(req, req2) |
||||
|
req = req2 |
||||
|
defer func() { |
||||
|
// Release req/clone mapping on error
|
||||
|
if err != nil { |
||||
|
t.setModReq(req, nil) |
||||
|
} |
||||
|
if resp != nil { |
||||
|
// Release req/clone mapping on body close/EOF
|
||||
|
resp.Body = &onEOFReader{ |
||||
|
rc: resp.Body, |
||||
|
fn: func() { t.setModReq(req, nil) }, |
||||
|
} |
||||
|
} |
||||
|
}() |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
resp, err = transport.RoundTrip(req) |
||||
|
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { |
||||
|
// Replace the 304 response with the one from cache, but update with some new headers
|
||||
|
endToEndHeaders := getEndToEndHeaders(resp.Header) |
||||
|
for _, header := range endToEndHeaders { |
||||
|
cachedResp.Header[header] = resp.Header[header] |
||||
|
} |
||||
|
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) |
||||
|
cachedResp.StatusCode = http.StatusOK |
||||
|
|
||||
|
resp = cachedResp |
||||
|
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && |
||||
|
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { |
||||
|
// In case of transport failure and stale-if-error activated, returns cached content
|
||||
|
// when available
|
||||
|
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK)) |
||||
|
cachedResp.StatusCode = http.StatusOK |
||||
|
return cachedResp, nil |
||||
|
} else { |
||||
|
if err != nil || resp.StatusCode != http.StatusOK { |
||||
|
t.Cache.Delete(cacheKey) |
||||
|
} |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
reqCacheControl := parseCacheControl(req.Header) |
||||
|
if _, ok := reqCacheControl["only-if-cached"]; ok { |
||||
|
resp = newGatewayTimeoutResponse(req) |
||||
|
} else { |
||||
|
resp, err = transport.RoundTrip(req) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { |
||||
|
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { |
||||
|
varyKey = http.CanonicalHeaderKey(varyKey) |
||||
|
fakeHeader := "X-Varied-" + varyKey |
||||
|
reqValue := req.Header.Get(varyKey) |
||||
|
if reqValue != "" { |
||||
|
resp.Header.Set(fakeHeader, reqValue) |
||||
|
} |
||||
|
} |
||||
|
respBytes, err := httputil.DumpResponse(resp, true) |
||||
|
if err == nil { |
||||
|
t.Cache.Set(cacheKey, respBytes) |
||||
|
} |
||||
|
} else { |
||||
|
t.Cache.Delete(cacheKey) |
||||
|
} |
||||
|
return resp, nil |
||||
|
} |
||||
|
|
||||
|
// CancelRequest calls CancelRequest on the underlaying transport if implemented or
|
||||
|
// throw a warning otherwise.
|
||||
|
func (t *Transport) CancelRequest(req *http.Request) { |
||||
|
type canceler interface { |
||||
|
CancelRequest(*http.Request) |
||||
|
} |
||||
|
tr, ok := t.Transport.(canceler) |
||||
|
if !ok { |
||||
|
log.Printf("httpcache: Client Transport of type %T doesn't support CancelRequest; Timeout not supported", t.Transport) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
t.mu.RLock() |
||||
|
if modReq, ok := t.modReq[req]; ok { |
||||
|
t.mu.RUnlock() |
||||
|
t.mu.Lock() |
||||
|
delete(t.modReq, req) |
||||
|
t.mu.Unlock() |
||||
|
tr.CancelRequest(modReq) |
||||
|
} else { |
||||
|
t.mu.RUnlock() |
||||
|
tr.CancelRequest(req) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
|
||||
|
var ErrNoDateHeader = errors.New("no Date header") |
||||
|
|
||||
|
// Date parses and returns the value of the Date header.
|
||||
|
func Date(respHeaders http.Header) (date time.Time, err error) { |
||||
|
dateHeader := respHeaders.Get("date") |
||||
|
if dateHeader == "" { |
||||
|
err = ErrNoDateHeader |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
return time.Parse(time.RFC1123, dateHeader) |
||||
|
} |
||||
|
|
||||
|
type realClock struct{} |
||||
|
|
||||
|
func (c *realClock) since(d time.Time) time.Duration { |
||||
|
return time.Since(d) |
||||
|
} |
||||
|
|
||||
|
type timer interface { |
||||
|
since(d time.Time) time.Duration |
||||
|
} |
||||
|
|
||||
|
var clock timer = &realClock{} |
||||
|
|
||||
|
// getFreshness will return one of fresh/stale/transparent based on the cache-control
|
||||
|
// values of the request and the response
|
||||
|
//
|
||||
|
// fresh indicates the response can be returned
|
||||
|
// stale indicates that the response needs validating before it is returned
|
||||
|
// transparent indicates the response should not be used to fulfil the request
|
||||
|
//
|
||||
|
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
|
||||
|
// signficant. Similarly, smax-age isn't used.
|
||||
|
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { |
||||
|
respCacheControl := parseCacheControl(respHeaders) |
||||
|
reqCacheControl := parseCacheControl(reqHeaders) |
||||
|
if _, ok := reqCacheControl["no-cache"]; ok { |
||||
|
return transparent |
||||
|
} |
||||
|
if _, ok := respCacheControl["no-cache"]; ok { |
||||
|
return stale |
||||
|
} |
||||
|
if _, ok := reqCacheControl["only-if-cached"]; ok { |
||||
|
return fresh |
||||
|
} |
||||
|
|
||||
|
date, err := Date(respHeaders) |
||||
|
if err != nil { |
||||
|
return stale |
||||
|
} |
||||
|
currentAge := clock.since(date) |
||||
|
|
||||
|
var lifetime time.Duration |
||||
|
var zeroDuration time.Duration |
||||
|
|
||||
|
// If a response includes both an Expires header and a max-age directive,
|
||||
|
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
|
||||
|
if maxAge, ok := respCacheControl["max-age"]; ok { |
||||
|
lifetime, err = time.ParseDuration(maxAge + "s") |
||||
|
if err != nil { |
||||
|
lifetime = zeroDuration |
||||
|
} |
||||
|
} else { |
||||
|
expiresHeader := respHeaders.Get("Expires") |
||||
|
if expiresHeader != "" { |
||||
|
expires, err := time.Parse(time.RFC1123, expiresHeader) |
||||
|
if err != nil { |
||||
|
lifetime = zeroDuration |
||||
|
} else { |
||||
|
lifetime = expires.Sub(date) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if maxAge, ok := reqCacheControl["max-age"]; ok { |
||||
|
// the client is willing to accept a response whose age is no greater than the specified time in seconds
|
||||
|
lifetime, err = time.ParseDuration(maxAge + "s") |
||||
|
if err != nil { |
||||
|
lifetime = zeroDuration |
||||
|
} |
||||
|
} |
||||
|
if minfresh, ok := reqCacheControl["min-fresh"]; ok { |
||||
|
// the client wants a response that will still be fresh for at least the specified number of seconds.
|
||||
|
minfreshDuration, err := time.ParseDuration(minfresh + "s") |
||||
|
if err == nil { |
||||
|
currentAge = time.Duration(currentAge + minfreshDuration) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if maxstale, ok := reqCacheControl["max-stale"]; ok { |
||||
|
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
|
||||
|
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
|
||||
|
// its expiration time by no more than the specified number of seconds.
|
||||
|
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
|
||||
|
//
|
||||
|
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
|
||||
|
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
|
||||
|
// return-value available here.
|
||||
|
if maxstale == "" { |
||||
|
return fresh |
||||
|
} |
||||
|
maxstaleDuration, err := time.ParseDuration(maxstale + "s") |
||||
|
if err == nil { |
||||
|
currentAge = time.Duration(currentAge - maxstaleDuration) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if lifetime > currentAge { |
||||
|
return fresh |
||||
|
} |
||||
|
|
||||
|
return stale |
||||
|
} |
||||
|
|
||||
|
// Returns true if either the request or the response includes the stale-if-error
|
||||
|
// cache control extension: https://tools.ietf.org/html/rfc5861
|
||||
|
func canStaleOnError(respHeaders, reqHeaders http.Header) bool { |
||||
|
respCacheControl := parseCacheControl(respHeaders) |
||||
|
reqCacheControl := parseCacheControl(reqHeaders) |
||||
|
|
||||
|
var err error |
||||
|
lifetime := time.Duration(-1) |
||||
|
|
||||
|
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { |
||||
|
if staleMaxAge != "" { |
||||
|
lifetime, err = time.ParseDuration(staleMaxAge + "s") |
||||
|
if err != nil { |
||||
|
return false |
||||
|
} |
||||
|
} else { |
||||
|
return true |
||||
|
} |
||||
|
} |
||||
|
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { |
||||
|
if staleMaxAge != "" { |
||||
|
lifetime, err = time.ParseDuration(staleMaxAge + "s") |
||||
|
if err != nil { |
||||
|
return false |
||||
|
} |
||||
|
} else { |
||||
|
return true |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if lifetime >= 0 { |
||||
|
date, err := Date(respHeaders) |
||||
|
if err != nil { |
||||
|
return false |
||||
|
} |
||||
|
currentAge := clock.since(date) |
||||
|
if lifetime > currentAge { |
||||
|
return true |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
func getEndToEndHeaders(respHeaders http.Header) []string { |
||||
|
// These headers are always hop-by-hop
|
||||
|
hopByHopHeaders := map[string]struct{}{ |
||||
|
"Connection": struct{}{}, |
||||
|
"Keep-Alive": struct{}{}, |
||||
|
"Proxy-Authenticate": struct{}{}, |
||||
|
"Proxy-Authorization": struct{}{}, |
||||
|
"Te": struct{}{}, |
||||
|
"Trailers": struct{}{}, |
||||
|
"Transfer-Encoding": struct{}{}, |
||||
|
"Upgrade": struct{}{}, |
||||
|
} |
||||
|
|
||||
|
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { |
||||
|
// any header listed in connection, if present, is also considered hop-by-hop
|
||||
|
if strings.Trim(extra, " ") != "" { |
||||
|
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} |
||||
|
} |
||||
|
} |
||||
|
endToEndHeaders := []string{} |
||||
|
for respHeader, _ := range respHeaders { |
||||
|
if _, ok := hopByHopHeaders[respHeader]; !ok { |
||||
|
endToEndHeaders = append(endToEndHeaders, respHeader) |
||||
|
} |
||||
|
} |
||||
|
return endToEndHeaders |
||||
|
} |
||||
|
|
||||
|
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { |
||||
|
if _, ok := respCacheControl["no-store"]; ok { |
||||
|
return false |
||||
|
} |
||||
|
if _, ok := reqCacheControl["no-store"]; ok { |
||||
|
return false |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
func newGatewayTimeoutResponse(req *http.Request) *http.Response { |
||||
|
var braw bytes.Buffer |
||||
|
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") |
||||
|
resp, err := http.ReadResponse(bufio.NewReader(&braw), req) |
||||
|
if err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
return resp |
||||
|
} |
||||
|
|
||||
|
// cloneRequest returns a clone of the provided *http.Request.
|
||||
|
// The clone is a shallow copy of the struct and its Header map.
|
||||
|
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
|
||||
|
func cloneRequest(r *http.Request) *http.Request { |
||||
|
// shallow copy of the struct
|
||||
|
r2 := new(http.Request) |
||||
|
*r2 = *r |
||||
|
// deep copy of the Header
|
||||
|
r2.Header = make(http.Header) |
||||
|
for k, s := range r.Header { |
||||
|
r2.Header[k] = s |
||||
|
} |
||||
|
return r2 |
||||
|
} |
||||
|
|
||||
|
type cacheControl map[string]string |
||||
|
|
||||
|
func parseCacheControl(headers http.Header) cacheControl { |
||||
|
cc := cacheControl{} |
||||
|
ccHeader := headers.Get("Cache-Control") |
||||
|
for _, part := range strings.Split(ccHeader, ",") { |
||||
|
part = strings.Trim(part, " ") |
||||
|
if part == "" { |
||||
|
continue |
||||
|
} |
||||
|
if strings.ContainsRune(part, '=') { |
||||
|
keyval := strings.Split(part, "=") |
||||
|
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") |
||||
|
} else { |
||||
|
cc[part] = "" |
||||
|
} |
||||
|
} |
||||
|
return cc |
||||
|
} |
||||
|
|
||||
|
// headerAllCommaSepValues returns all comma-separated values (each
|
||||
|
// with whitespace trimmed) for header name in headers. According to
|
||||
|
// Section 4.2 of the HTTP/1.1 spec
|
||||
|
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
|
||||
|
// values from multiple occurrences of a header should be concatenated, if
|
||||
|
// the header's value is a comma-separated list.
|
||||
|
func headerAllCommaSepValues(headers http.Header, name string) []string { |
||||
|
var vals []string |
||||
|
for _, val := range headers[http.CanonicalHeaderKey(name)] { |
||||
|
fields := strings.Split(val, ",") |
||||
|
for i, f := range fields { |
||||
|
fields[i] = strings.TrimSpace(f) |
||||
|
} |
||||
|
vals = append(vals, fields...) |
||||
|
} |
||||
|
return vals |
||||
|
} |
||||
|
|
||||
|
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
|
||||
|
func NewMemoryCacheTransport() *Transport { |
||||
|
c := NewMemoryCache() |
||||
|
t := NewTransport(c) |
||||
|
return t |
||||
|
} |
1208
vendor/src/github.com/gregjones/httpcache/httpcache_test.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,51 @@ |
|||||
|
// Package leveldbcache provides an implementation of httpcache.Cache that
|
||||
|
// uses github.com/syndtr/goleveldb/leveldb
|
||||
|
package leveldbcache |
||||
|
|
||||
|
import ( |
||||
|
"github.com/syndtr/goleveldb/leveldb" |
||||
|
) |
||||
|
|
||||
|
// Cache is an implementation of httpcache.Cache with leveldb storage
|
||||
|
type Cache struct { |
||||
|
db *leveldb.DB |
||||
|
} |
||||
|
|
||||
|
// Get returns the response corresponding to key if present
|
||||
|
func (c *Cache) Get(key string) (resp []byte, ok bool) { |
||||
|
var err error |
||||
|
resp, err = c.db.Get([]byte(key), nil) |
||||
|
if err != nil { |
||||
|
return []byte{}, false |
||||
|
} |
||||
|
return resp, true |
||||
|
} |
||||
|
|
||||
|
// Set saves a response to the cache as key
|
||||
|
func (c *Cache) Set(key string, resp []byte) { |
||||
|
c.db.Put([]byte(key), resp, nil) |
||||
|
} |
||||
|
|
||||
|
// Delete removes the response with key from the cache
|
||||
|
func (c *Cache) Delete(key string) { |
||||
|
c.db.Delete([]byte(key), nil) |
||||
|
} |
||||
|
|
||||
|
// New returns a new Cache that will store leveldb in path
|
||||
|
func New(path string) (*Cache, error) { |
||||
|
cache := &Cache{} |
||||
|
|
||||
|
var err error |
||||
|
cache.db, err = leveldb.OpenFile(path, nil) |
||||
|
|
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
return cache, nil |
||||
|
} |
||||
|
|
||||
|
// NewWithDB returns a new Cache using the provided leveldb as underlying
|
||||
|
// storage.
|
||||
|
func NewWithDB(db *leveldb.DB) *Cache { |
||||
|
return &Cache{db} |
||||
|
} |
@ -0,0 +1,46 @@ |
|||||
|
package leveldbcache |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"io/ioutil" |
||||
|
"os" |
||||
|
"path/filepath" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestDiskCache(t *testing.T) { |
||||
|
tempDir, err := ioutil.TempDir("", "httpcache") |
||||
|
if err != nil { |
||||
|
t.Fatalf("TempDir: %v", err) |
||||
|
} |
||||
|
defer os.RemoveAll(tempDir) |
||||
|
|
||||
|
cache, err := New(filepath.Join(tempDir, "db")) |
||||
|
if err != nil { |
||||
|
t.Fatalf("New leveldb,: %v", err) |
||||
|
} |
||||
|
|
||||
|
key := "testKey" |
||||
|
_, ok := cache.Get(key) |
||||
|
if ok { |
||||
|
t.Fatal("retrieved key before adding it") |
||||
|
} |
||||
|
|
||||
|
val := []byte("some bytes") |
||||
|
cache.Set(key, val) |
||||
|
|
||||
|
retVal, ok := cache.Get(key) |
||||
|
if !ok { |
||||
|
t.Fatal("could not retrieve an element we just added") |
||||
|
} |
||||
|
if !bytes.Equal(retVal, val) { |
||||
|
t.Fatal("retrieved a different value than what we put in") |
||||
|
} |
||||
|
|
||||
|
cache.Delete(key) |
||||
|
|
||||
|
_, ok = cache.Get(key) |
||||
|
if ok { |
||||
|
t.Fatal("deleted key still present") |
||||
|
} |
||||
|
} |
@ -0,0 +1,61 @@ |
|||||
|
// +build appengine
|
||||
|
|
||||
|
// Package memcache provides an implementation of httpcache.Cache that uses App
|
||||
|
// Engine's memcache package to store cached responses.
|
||||
|
//
|
||||
|
// When not built for Google App Engine, this package will provide an
|
||||
|
// implementation that connects to a specified memcached server. See the
|
||||
|
// memcache.go file in this package for details.
|
||||
|
package memcache |
||||
|
|
||||
|
import ( |
||||
|
"appengine" |
||||
|
"appengine/memcache" |
||||
|
) |
||||
|
|
||||
|
// Cache is an implementation of httpcache.Cache that caches responses in App
|
||||
|
// Engine's memcache.
|
||||
|
type Cache struct { |
||||
|
appengine.Context |
||||
|
} |
||||
|
|
||||
|
// cacheKey modifies an httpcache key for use in memcache. Specifically, it
|
||||
|
// prefixes keys to avoid collision with other data stored in memcache.
|
||||
|
func cacheKey(key string) string { |
||||
|
return "httpcache:" + key |
||||
|
} |
||||
|
|
||||
|
// Get returns the response corresponding to key if present.
|
||||
|
func (c *Cache) Get(key string) (resp []byte, ok bool) { |
||||
|
item, err := memcache.Get(c.Context, cacheKey(key)) |
||||
|
if err != nil { |
||||
|
if err != memcache.ErrCacheMiss { |
||||
|
c.Context.Errorf("error getting cached response: %v", err) |
||||
|
} |
||||
|
return nil, false |
||||
|
} |
||||
|
return item.Value, true |
||||
|
} |
||||
|
|
||||
|
// Set saves a response to the cache as key.
|
||||
|
func (c *Cache) Set(key string, resp []byte) { |
||||
|
item := &memcache.Item{ |
||||
|
Key: cacheKey(key), |
||||
|
Value: resp, |
||||
|
} |
||||
|
if err := memcache.Set(c.Context, item); err != nil { |
||||
|
c.Context.Errorf("error caching response: %v", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Delete removes the response with key from the cache.
|
||||
|
func (c *Cache) Delete(key string) { |
||||
|
if err := memcache.Delete(c.Context, cacheKey(key)); err != nil { |
||||
|
c.Context.Errorf("error deleting cached response: %v", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// New returns a new Cache for the given context.
|
||||
|
func New(ctx appengine.Context) *Cache { |
||||
|
return &Cache{ctx} |
||||
|
} |
@ -0,0 +1,44 @@ |
|||||
|
// +build appengine
|
||||
|
|
||||
|
package memcache |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"testing" |
||||
|
|
||||
|
"appengine/aetest" |
||||
|
) |
||||
|
|
||||
|
func TestAppEngine(t *testing.T) { |
||||
|
ctx, err := aetest.NewContext(nil) |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
defer ctx.Close() |
||||
|
|
||||
|
cache := New(ctx) |
||||
|
|
||||
|
key := "testKey" |
||||
|
_, ok := cache.Get(key) |
||||
|
if ok { |
||||
|
t.Fatal("retrieved key before adding it") |
||||
|
} |
||||
|
|
||||
|
val := []byte("some bytes") |
||||
|
cache.Set(key, val) |
||||
|
|
||||
|
retVal, ok := cache.Get(key) |
||||
|
if !ok { |
||||
|
t.Fatal("could not retrieve an element we just added") |
||||
|
} |
||||
|
if !bytes.Equal(retVal, val) { |
||||
|
t.Fatal("retrieved a different value than what we put in") |
||||
|
} |
||||
|
|
||||
|
cache.Delete(key) |
||||
|
|
||||
|
_, ok = cache.Get(key) |
||||
|
if ok { |
||||
|
t.Fatal("deleted key still present") |
||||
|
} |
||||
|
} |
@ -0,0 +1,60 @@ |
|||||
|
// +build !appengine
|
||||
|
|
||||
|
// Package memcache provides an implementation of httpcache.Cache that uses
|
||||
|
// gomemcache to store cached responses.
|
||||
|
//
|
||||
|
// When built for Google App Engine, this package will provide an
|
||||
|
// implementation that uses App Engine's memcache service. See the
|
||||
|
// appengine.go file in this package for details.
|
||||
|
package memcache |
||||
|
|
||||
|
import ( |
||||
|
"github.com/bradfitz/gomemcache/memcache" |
||||
|
) |
||||
|
|
||||
|
// Cache is an implementation of httpcache.Cache that caches responses in a
|
||||
|
// memcache server.
|
||||
|
type Cache struct { |
||||
|
*memcache.Client |
||||
|
} |
||||
|
|
||||
|
// cacheKey modifies an httpcache key for use in memcache. Specifically, it
|
||||
|
// prefixes keys to avoid collision with other data stored in memcache.
|
||||
|
func cacheKey(key string) string { |
||||
|
return "httpcache:" + key |
||||
|
} |
||||
|
|
||||
|
// Get returns the response corresponding to key if present.
|
||||
|
func (c *Cache) Get(key string) (resp []byte, ok bool) { |
||||
|
item, err := c.Client.Get(cacheKey(key)) |
||||
|
if err != nil { |
||||
|
return nil, false |
||||
|
} |
||||
|
return item.Value, true |
||||
|
} |
||||
|
|
||||
|
// Set saves a response to the cache as key.
|
||||
|
func (c *Cache) Set(key string, resp []byte) { |
||||
|
item := &memcache.Item{ |
||||
|
Key: cacheKey(key), |
||||
|
Value: resp, |
||||
|
} |
||||
|
c.Client.Set(item) |
||||
|
} |
||||
|
|
||||
|
// Delete removes the response with key from the cache.
|
||||
|
func (c *Cache) Delete(key string) { |
||||
|
c.Client.Delete(cacheKey(key)) |
||||
|
} |
||||
|
|
||||
|
// New returns a new Cache using the provided memcache server(s) with equal
|
||||
|
// weight. If a server is listed multiple times, it gets a proportional amount
|
||||
|
// of weight.
|
||||
|
func New(server ...string) *Cache { |
||||
|
return NewWithClient(memcache.New(server...)) |
||||
|
} |
||||
|
|
||||
|
// NewWithClient returns a new Cache with the given memcache client.
|
||||
|
func NewWithClient(client *memcache.Client) *Cache { |
||||
|
return &Cache{client} |
||||
|
} |
@ -0,0 +1,47 @@ |
|||||
|
// +build !appengine
|
||||
|
|
||||
|
package memcache |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"net" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
const testServer = "localhost:11211" |
||||
|
|
||||
|
func TestMemCache(t *testing.T) { |
||||
|
conn, err := net.Dial("tcp", testServer) |
||||
|
if err != nil { |
||||
|
// TODO: rather than skip the test, fall back to a faked memcached server
|
||||
|
t.Skipf("skipping test; no server running at %s", testServer) |
||||
|
} |
||||
|
conn.Write([]byte("flush_all\r\n")) // flush memcache
|
||||
|
conn.Close() |
||||
|
|
||||
|
cache := New(testServer) |
||||
|
|
||||
|
key := "testKey" |
||||
|
_, ok := cache.Get(key) |
||||
|
if ok { |
||||
|
t.Fatal("retrieved key before adding it") |
||||
|
} |
||||
|
|
||||
|
val := []byte("some bytes") |
||||
|
cache.Set(key, val) |
||||
|
|
||||
|
retVal, ok := cache.Get(key) |
||||
|
if !ok { |
||||
|
t.Fatal("could not retrieve an element we just added") |
||||
|
} |
||||
|
if !bytes.Equal(retVal, val) { |
||||
|
t.Fatal("retrieved a different value than what we put in") |
||||
|
} |
||||
|
|
||||
|
cache.Delete(key) |
||||
|
|
||||
|
_, ok = cache.Get(key) |
||||
|
if ok { |
||||
|
t.Fatal("deleted key still present") |
||||
|
} |
||||
|
} |
@ -0,0 +1,43 @@ |
|||||
|
// Package redis provides a redis interface for http caching.
|
||||
|
package redis |
||||
|
|
||||
|
import ( |
||||
|
"github.com/garyburd/redigo/redis" |
||||
|
"github.com/gregjones/httpcache" |
||||
|
) |
||||
|
|
||||
|
// cache is an implementation of httpcache.Cache that caches responses in a
|
||||
|
// redis server.
|
||||
|
type cache struct { |
||||
|
redis.Conn |
||||
|
} |
||||
|
|
||||
|
// cacheKey modifies an httpcache key for use in redis. Specifically, it
|
||||
|
// prefixes keys to avoid collision with other data stored in redis.
|
||||
|
func cacheKey(key string) string { |
||||
|
return "rediscache:" + key |
||||
|
} |
||||
|
|
||||
|
// Get returns the response corresponding to key if present.
|
||||
|
func (c cache) Get(key string) (resp []byte, ok bool) { |
||||
|
item, err := redis.Bytes(c.Do("GET", cacheKey(key))) |
||||
|
if err != nil { |
||||
|
return nil, false |
||||
|
} |
||||
|
return item, true |
||||
|
} |
||||
|
|
||||
|
// Set saves a response to the cache as key.
|
||||
|
func (c cache) Set(key string, resp []byte) { |
||||
|
c.Do("SET", cacheKey(key), resp) |
||||
|
} |
||||
|
|
||||
|
// Delete removes the response with key from the cache.
|
||||
|
func (c cache) Delete(key string) { |
||||
|
c.Do("DEL", cacheKey(key)) |
||||
|
} |
||||
|
|
||||
|
// NewWithClient returns a new Cache with the given redis connection.
|
||||
|
func NewWithClient(client redis.Conn) httpcache.Cache { |
||||
|
return cache{client} |
||||
|
} |
@ -0,0 +1,43 @@ |
|||||
|
package redis |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/garyburd/redigo/redis" |
||||
|
) |
||||
|
|
||||
|
func TestRedisCache(t *testing.T) { |
||||
|
conn, err := redis.Dial("tcp", "localhost:6379") |
||||
|
if err != nil { |
||||
|
// TODO: rather than skip the test, fall back to a faked redis server
|
||||
|
t.Skipf("skipping test; no server running at localhost:6379") |
||||
|
} |
||||
|
conn.Do("FLUSHALL") |
||||
|
|
||||
|
cache := NewWithClient(conn) |
||||
|
|
||||
|
key := "testKey" |
||||
|
_, ok := cache.Get(key) |
||||
|
if ok { |
||||
|
t.Fatal("retrieved key before adding it") |
||||
|
} |
||||
|
|
||||
|
val := []byte("some bytes") |
||||
|
cache.Set(key, val) |
||||
|
|
||||
|
retVal, ok := cache.Get(key) |
||||
|
if !ok { |
||||
|
t.Fatal("could not retrieve an element we just added") |
||||
|
} |
||||
|
if !bytes.Equal(retVal, val) { |
||||
|
t.Fatal("retrieved a different value than what we put in") |
||||
|
} |
||||
|
|
||||
|
cache.Delete(key) |
||||
|
|
||||
|
_, ok = cache.Get(key) |
||||
|
if ok { |
||||
|
t.Fatal("deleted key still present") |
||||
|
} |
||||
|
} |
@ -0,0 +1,19 @@ |
|||||
|
Copyright (c) 2011-2012 Peter Bourgon |
||||
|
|
||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
|
of this software and associated documentation files (the "Software"), to deal |
||||
|
in the Software without restriction, including without limitation the rights |
||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
|
copies of the Software, and to permit persons to whom the Software is |
||||
|
furnished to do so, subject to the following conditions: |
||||
|
|
||||
|
The above copyright notice and this permission notice shall be included in |
||||
|
all copies or substantial portions of the Software. |
||||
|
|
||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
||||
|
THE SOFTWARE. |
@ -0,0 +1,141 @@ |
|||||
|
# What is diskv? |
||||
|
|
||||
|
Diskv (disk-vee) is a simple, persistent key-value store written in the Go |
||||
|
language. It starts with an incredibly simple API for storing arbitrary data on |
||||
|
a filesystem by key, and builds several layers of performance-enhancing |
||||
|
abstraction on top. The end result is a conceptually simple, but highly |
||||
|
performant, disk-backed storage system. |
||||
|
|
||||
|
[![Build Status][1]][2] |
||||
|
|
||||
|
[1]: https://drone.io/github.com/peterbourgon/diskv/status.png |
||||
|
[2]: https://drone.io/github.com/peterbourgon/diskv/latest |
||||
|
|
||||
|
|
||||
|
# Installing |
||||
|
|
||||
|
Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5]. |
||||
|
Then, |
||||
|
|
||||
|
```bash |
||||
|
$ go get github.com/peterbourgon/diskv |
||||
|
``` |
||||
|
|
||||
|
[3]: http://golang.org |
||||
|
[4]: http://golang.org/doc/install/source |
||||
|
[5]: http://golang.org/doc/install |
||||
|
|
||||
|
|
||||
|
# Usage |
||||
|
|
||||
|
```go |
||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"github.com/peterbourgon/diskv" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
// Simplest transform function: put all the data files into the base dir. |
||||
|
flatTransform := func(s string) []string { return []string{} } |
||||
|
|
||||
|
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache. |
||||
|
d := diskv.New(diskv.Options{ |
||||
|
BasePath: "my-data-dir", |
||||
|
Transform: flatTransform, |
||||
|
CacheSizeMax: 1024 * 1024, |
||||
|
}) |
||||
|
|
||||
|
// Write three bytes to the key "alpha". |
||||
|
key := "alpha" |
||||
|
d.Write(key, []byte{'1', '2', '3'}) |
||||
|
|
||||
|
// Read the value back out of the store. |
||||
|
value, _ := d.Read(key) |
||||
|
fmt.Printf("%v\n", value) |
||||
|
|
||||
|
// Erase the key+value from the store (and the disk). |
||||
|
d.Erase(key) |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
More complex examples can be found in the "examples" subdirectory. |
||||
|
|
||||
|
|
||||
|
# Theory |
||||
|
|
||||
|
## Basic idea |
||||
|
|
||||
|
At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`). |
||||
|
The data is written to a single file on disk, with the same name as the key. |
||||
|
The key determines where that file will be stored, via a user-provided |
||||
|
`TransformFunc`, which takes a key and returns a slice (`[]string`) |
||||
|
corresponding to a path list where the key file will be stored. The simplest |
||||
|
TransformFunc, |
||||
|
|
||||
|
```go |
||||
|
func SimpleTransform (key string) []string { |
||||
|
return []string{} |
||||
|
} |
||||
|
``` |
||||
|
|
||||
|
will place all keys in the same, base directory. The design is inspired by |
||||
|
[Redis diskstore][6]; a TransformFunc which emulates the default diskstore |
||||
|
behavior is available in the content-addressable-storage example. |
||||
|
|
||||
|
[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1 |
||||
|
|
||||
|
**Note** that your TransformFunc should ensure that one valid key doesn't |
||||
|
transform to a subset of another valid key. That is, it shouldn't be possible |
||||
|
to construct valid keys that resolve to directory names. As a concrete example, |
||||
|
if your TransformFunc splits on every 3 characters, then |
||||
|
|
||||
|
```go |
||||
|
d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc |
||||
|
d.Write("abc", val) // Error: attempted write to <base>/abc/abc, but it's a directory |
||||
|
``` |
||||
|
|
||||
|
This will be addressed in an upcoming version of diskv. |
||||
|
|
||||
|
Probably the most important design principle behind diskv is that your data is |
||||
|
always flatly available on the disk. diskv will never do anything that would |
||||
|
prevent you from accessing, copying, backing up, or otherwise interacting with |
||||
|
your data via common UNIX commandline tools. |
||||
|
|
||||
|
## Adding a cache |
||||
|
|
||||
|
An in-memory caching layer is provided by combining the BasicStore |
||||
|
functionality with a simple map structure, and keeping it up-to-date as |
||||
|
appropriate. Since the map structure in Go is not threadsafe, it's combined |
||||
|
with a RWMutex to provide safe concurrent access. |
||||
|
|
||||
|
## Adding order |
||||
|
|
||||
|
diskv is a key-value store and therefore inherently unordered. An ordering |
||||
|
system can be injected into the store by passing something which satisfies the |
||||
|
diskv.Index interface. (A default implementation, using Google's |
||||
|
[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a |
||||
|
user-provided Less function) index of the keys, which can be queried. |
||||
|
|
||||
|
[7]: https://github.com/google/btree |
||||
|
|
||||
|
## Adding compression |
||||
|
|
||||
|
Something which implements the diskv.Compression interface may be passed |
||||
|
during store creation, so that all Writes and Reads are filtered through |
||||
|
a compression/decompression pipeline. Several default implementations, |
||||
|
using stdlib compression algorithms, are provided. Note that data is cached |
||||
|
compressed; the cost of decompression is borne with each Read. |
||||
|
|
||||
|
## Streaming |
||||
|
|
||||
|
diskv also now provides ReadStream and WriteStream methods, to allow very large |
||||
|
data to be handled efficiently. |
||||
|
|
||||
|
|
||||
|
# Future plans |
||||
|
|
||||
|
* Needs plenty of robust testing: huge datasets, etc... |
||||
|
* More thorough benchmarking |
||||
|
* Your suggestions for use-cases I haven't thought of |
@ -0,0 +1,253 @@ |
|||||
|
package diskv |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
func cmpBytes(a, b []byte) bool { |
||||
|
if len(a) != len(b) { |
||||
|
return false |
||||
|
} |
||||
|
for i := 0; i < len(a); i++ { |
||||
|
if a[i] != b[i] { |
||||
|
return false |
||||
|
} |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
func (d *Diskv) isCached(key string) bool { |
||||
|
d.mu.RLock() |
||||
|
defer d.mu.RUnlock() |
||||
|
_, ok := d.cache[key] |
||||
|
return ok |
||||
|
} |
||||
|
|
||||
|
func TestWriteReadErase(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "test-data", |
||||
|
CacheSizeMax: 1024, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
k, v := "a", []byte{'b'} |
||||
|
if err := d.Write(k, v); err != nil { |
||||
|
t.Fatalf("write: %s", err) |
||||
|
} |
||||
|
if readVal, err := d.Read(k); err != nil { |
||||
|
t.Fatalf("read: %s", err) |
||||
|
} else if bytes.Compare(v, readVal) != 0 { |
||||
|
t.Fatalf("read: expected %s, got %s", v, readVal) |
||||
|
} |
||||
|
if err := d.Erase(k); err != nil { |
||||
|
t.Fatalf("erase: %s", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestWRECache(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "test-data", |
||||
|
CacheSizeMax: 1024, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
k, v := "xxx", []byte{' ', ' ', ' '} |
||||
|
if d.isCached(k) { |
||||
|
t.Fatalf("key cached before Write and Read") |
||||
|
} |
||||
|
if err := d.Write(k, v); err != nil { |
||||
|
t.Fatalf("write: %s", err) |
||||
|
} |
||||
|
if d.isCached(k) { |
||||
|
t.Fatalf("key cached before Read") |
||||
|
} |
||||
|
if readVal, err := d.Read(k); err != nil { |
||||
|
t.Fatalf("read: %s", err) |
||||
|
} else if bytes.Compare(v, readVal) != 0 { |
||||
|
t.Fatalf("read: expected %s, got %s", v, readVal) |
||||
|
} |
||||
|
for i := 0; i < 10 && !d.isCached(k); i++ { |
||||
|
time.Sleep(10 * time.Millisecond) |
||||
|
} |
||||
|
if !d.isCached(k) { |
||||
|
t.Fatalf("key not cached after Read") |
||||
|
} |
||||
|
if err := d.Erase(k); err != nil { |
||||
|
t.Fatalf("erase: %s", err) |
||||
|
} |
||||
|
if d.isCached(k) { |
||||
|
t.Fatalf("key cached after Erase") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestStrings(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "test-data", |
||||
|
CacheSizeMax: 1024, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
keys := map[string]bool{"a": false, "b": false, "c": false, "d": false} |
||||
|
v := []byte{'1'} |
||||
|
for k := range keys { |
||||
|
if err := d.Write(k, v); err != nil { |
||||
|
t.Fatalf("write: %s: %s", k, err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
for k := range d.Keys(nil) { |
||||
|
if _, present := keys[k]; present { |
||||
|
t.Logf("got: %s", k) |
||||
|
keys[k] = true |
||||
|
} else { |
||||
|
t.Fatalf("strings() returns unknown key: %s", k) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
for k, found := range keys { |
||||
|
if !found { |
||||
|
t.Errorf("never got %s", k) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestZeroByteCache(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "test-data", |
||||
|
CacheSizeMax: 0, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
k, v := "a", []byte{'1', '2', '3'} |
||||
|
if err := d.Write(k, v); err != nil { |
||||
|
t.Fatalf("Write: %s", err) |
||||
|
} |
||||
|
|
||||
|
if d.isCached(k) { |
||||
|
t.Fatalf("key cached, expected not-cached") |
||||
|
} |
||||
|
|
||||
|
if _, err := d.Read(k); err != nil { |
||||
|
t.Fatalf("Read: %s", err) |
||||
|
} |
||||
|
|
||||
|
if d.isCached(k) { |
||||
|
t.Fatalf("key cached, expected not-cached") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestOneByteCache(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "test-data", |
||||
|
CacheSizeMax: 1, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
k1, k2, v1, v2 := "a", "b", []byte{'1'}, []byte{'1', '2'} |
||||
|
if err := d.Write(k1, v1); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
if v, err := d.Read(k1); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} else if !cmpBytes(v, v1) { |
||||
|
t.Fatalf("Read: expected %s, got %s", string(v1), string(v)) |
||||
|
} |
||||
|
|
||||
|
for i := 0; i < 10 && !d.isCached(k1); i++ { |
||||
|
time.Sleep(10 * time.Millisecond) |
||||
|
} |
||||
|
if !d.isCached(k1) { |
||||
|
t.Fatalf("expected 1-byte value to be cached, but it wasn't") |
||||
|
} |
||||
|
|
||||
|
if err := d.Write(k2, v2); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
if _, err := d.Read(k2); err != nil { |
||||
|
t.Fatalf("--> %s", err) |
||||
|
} |
||||
|
|
||||
|
for i := 0; i < 10 && (!d.isCached(k1) || d.isCached(k2)); i++ { |
||||
|
time.Sleep(10 * time.Millisecond) // just wait for lazy-cache
|
||||
|
} |
||||
|
if !d.isCached(k1) { |
||||
|
t.Fatalf("1-byte value was uncached for no reason") |
||||
|
} |
||||
|
|
||||
|
if d.isCached(k2) { |
||||
|
t.Fatalf("2-byte value was cached, but cache max size is 1") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestStaleCache(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "test-data", |
||||
|
CacheSizeMax: 1, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
k, first, second := "a", "first", "second" |
||||
|
if err := d.Write(k, []byte(first)); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
v, err := d.Read(k) |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
if string(v) != first { |
||||
|
t.Errorf("expected '%s', got '%s'", first, v) |
||||
|
} |
||||
|
|
||||
|
if err := d.Write(k, []byte(second)); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
v, err = d.Read(k) |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
if string(v) != second { |
||||
|
t.Errorf("expected '%s', got '%s'", second, v) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestHas(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "test-data", |
||||
|
CacheSizeMax: 1024, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
for k, v := range map[string]string{ |
||||
|
"a": "1", |
||||
|
"foo": "2", |
||||
|
"012345": "3", |
||||
|
} { |
||||
|
d.Write(k, []byte(v)) |
||||
|
} |
||||
|
|
||||
|
d.Read("foo") // cache one of them
|
||||
|
if !d.isCached("foo") { |
||||
|
t.Errorf("'foo' didn't get cached") |
||||
|
} |
||||
|
|
||||
|
for _, tuple := range []struct { |
||||
|
key string |
||||
|
expected bool |
||||
|
}{ |
||||
|
{"a", true}, |
||||
|
{"b", false}, |
||||
|
{"foo", true}, |
||||
|
{"bar", false}, |
||||
|
{"01234", false}, |
||||
|
{"012345", true}, |
||||
|
{"0123456", false}, |
||||
|
} { |
||||
|
if expected, got := tuple.expected, d.Has(tuple.key); expected != got { |
||||
|
t.Errorf("Has(%s): expected %v, got %v", tuple.key, expected, got) |
||||
|
} |
||||
|
} |
||||
|
} |
@ -0,0 +1,64 @@ |
|||||
|
package diskv |
||||
|
|
||||
|
import ( |
||||
|
"compress/flate" |
||||
|
"compress/gzip" |
||||
|
"compress/zlib" |
||||
|
"io" |
||||
|
) |
||||
|
|
||||
|
// Compression is an interface that Diskv uses to implement compression of
|
||||
|
// data. Writer takes a destination io.Writer and returns a WriteCloser that
|
||||
|
// compresses all data written through it. Reader takes a source io.Reader and
|
||||
|
// returns a ReadCloser that decompresses all data read through it. You may
|
||||
|
// define these methods on your own type, or use one of the NewCompression
|
||||
|
// helpers.
|
||||
|
type Compression interface { |
||||
|
Writer(dst io.Writer) (io.WriteCloser, error) |
||||
|
Reader(src io.Reader) (io.ReadCloser, error) |
||||
|
} |
||||
|
|
||||
|
// NewGzipCompression returns a Gzip-based Compression.
|
||||
|
func NewGzipCompression() Compression { |
||||
|
return NewGzipCompressionLevel(flate.DefaultCompression) |
||||
|
} |
||||
|
|
||||
|
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
|
||||
|
func NewGzipCompressionLevel(level int) Compression { |
||||
|
return &genericCompression{ |
||||
|
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) }, |
||||
|
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) }, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// NewZlibCompression returns a Zlib-based Compression.
|
||||
|
func NewZlibCompression() Compression { |
||||
|
return NewZlibCompressionLevel(flate.DefaultCompression) |
||||
|
} |
||||
|
|
||||
|
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
|
||||
|
func NewZlibCompressionLevel(level int) Compression { |
||||
|
return NewZlibCompressionLevelDict(level, nil) |
||||
|
} |
||||
|
|
||||
|
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
|
||||
|
// level, based on the given dictionary.
|
||||
|
func NewZlibCompressionLevelDict(level int, dict []byte) Compression { |
||||
|
return &genericCompression{ |
||||
|
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) }, |
||||
|
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) }, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type genericCompression struct { |
||||
|
wf func(w io.Writer) (io.WriteCloser, error) |
||||
|
rf func(r io.Reader) (io.ReadCloser, error) |
||||
|
} |
||||
|
|
||||
|
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) { |
||||
|
return g.wf(dst) |
||||
|
} |
||||
|
|
||||
|
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) { |
||||
|
return g.rf(src) |
||||
|
} |
@ -0,0 +1,72 @@ |
|||||
|
package diskv |
||||
|
|
||||
|
import ( |
||||
|
"compress/flate" |
||||
|
"fmt" |
||||
|
"math/rand" |
||||
|
"os" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
func init() { |
||||
|
rand.Seed(time.Now().UnixNano()) |
||||
|
} |
||||
|
|
||||
|
func testCompressionWith(t *testing.T, c Compression, name string) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "compression-test", |
||||
|
CacheSizeMax: 0, |
||||
|
Compression: c, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
sz := 4096 |
||||
|
val := make([]byte, sz) |
||||
|
for i := 0; i < sz; i++ { |
||||
|
val[i] = byte('a' + rand.Intn(26)) // {a-z}; should compress some
|
||||
|
} |
||||
|
|
||||
|
key := "a" |
||||
|
if err := d.Write(key, val); err != nil { |
||||
|
t.Fatalf("write failed: %s", err) |
||||
|
} |
||||
|
|
||||
|
targetFile := fmt.Sprintf("%s%c%s", d.BasePath, os.PathSeparator, key) |
||||
|
fi, err := os.Stat(targetFile) |
||||
|
if err != nil { |
||||
|
t.Fatalf("%s: %s", targetFile, err) |
||||
|
} |
||||
|
|
||||
|
if fi.Size() >= int64(sz) { |
||||
|
t.Fatalf("%s: size=%d, expected smaller", targetFile, fi.Size()) |
||||
|
} |
||||
|
t.Logf("%s compressed %d to %d", name, sz, fi.Size()) |
||||
|
|
||||
|
readVal, err := d.Read(key) |
||||
|
if len(readVal) != sz { |
||||
|
t.Fatalf("read: expected size=%d, got size=%d", sz, len(readVal)) |
||||
|
} |
||||
|
|
||||
|
for i := 0; i < sz; i++ { |
||||
|
if readVal[i] != val[i] { |
||||
|
t.Fatalf("i=%d: expected %v, got %v", i, val[i], readVal[i]) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestGzipDefault(t *testing.T) { |
||||
|
testCompressionWith(t, NewGzipCompression(), "gzip") |
||||
|
} |
||||
|
|
||||
|
func TestGzipBestCompression(t *testing.T) { |
||||
|
testCompressionWith(t, NewGzipCompressionLevel(flate.BestCompression), "gzip-max") |
||||
|
} |
||||
|
|
||||
|
func TestGzipBestSpeed(t *testing.T) { |
||||
|
testCompressionWith(t, NewGzipCompressionLevel(flate.BestSpeed), "gzip-min") |
||||
|
} |
||||
|
|
||||
|
func TestZlib(t *testing.T) { |
||||
|
testCompressionWith(t, NewZlibCompression(), "zlib") |
||||
|
} |
@ -0,0 +1,578 @@ |
|||||
|
// Diskv (disk-vee) is a simple, persistent, key-value store.
|
||||
|
// It stores all data flatly on the filesystem.
|
||||
|
|
||||
|
package diskv |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"errors" |
||||
|
"fmt" |
||||
|
"io" |
||||
|
"io/ioutil" |
||||
|
"os" |
||||
|
"path/filepath" |
||||
|
"strings" |
||||
|
"sync" |
||||
|
"syscall" |
||||
|
) |
||||
|
|
||||
|
const ( |
||||
|
defaultBasePath = "diskv" |
||||
|
defaultFilePerm os.FileMode = 0666 |
||||
|
defaultPathPerm os.FileMode = 0777 |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
defaultTransform = func(s string) []string { return []string{} } |
||||
|
errCanceled = errors.New("canceled") |
||||
|
errEmptyKey = errors.New("empty key") |
||||
|
errBadKey = errors.New("bad key") |
||||
|
errImportDirectory = errors.New("can't import a directory") |
||||
|
) |
||||
|
|
||||
|
// TransformFunction transforms a key into a slice of strings, with each
|
||||
|
// element in the slice representing a directory in the file path where the
|
||||
|
// key's entry will eventually be stored.
|
||||
|
//
|
||||
|
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
|
||||
|
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
|
||||
|
type TransformFunction func(s string) []string |
||||
|
|
||||
|
// Options define a set of properties that dictate Diskv behavior.
|
||||
|
// All values are optional.
|
||||
|
type Options struct { |
||||
|
BasePath string |
||||
|
Transform TransformFunction |
||||
|
CacheSizeMax uint64 // bytes
|
||||
|
PathPerm os.FileMode |
||||
|
FilePerm os.FileMode |
||||
|
|
||||
|
Index Index |
||||
|
IndexLess LessFunction |
||||
|
|
||||
|
Compression Compression |
||||
|
} |
||||
|
|
||||
|
// Diskv implements the Diskv interface. You shouldn't construct Diskv
|
||||
|
// structures directly; instead, use the New constructor.
|
||||
|
type Diskv struct { |
||||
|
Options |
||||
|
mu sync.RWMutex |
||||
|
cache map[string][]byte |
||||
|
cacheSize uint64 |
||||
|
} |
||||
|
|
||||
|
// New returns an initialized Diskv structure, ready to use.
|
||||
|
// If the path identified by baseDir already contains data,
|
||||
|
// it will be accessible, but not yet cached.
|
||||
|
func New(o Options) *Diskv { |
||||
|
if o.BasePath == "" { |
||||
|
o.BasePath = defaultBasePath |
||||
|
} |
||||
|
if o.Transform == nil { |
||||
|
o.Transform = defaultTransform |
||||
|
} |
||||
|
if o.PathPerm == 0 { |
||||
|
o.PathPerm = defaultPathPerm |
||||
|
} |
||||
|
if o.FilePerm == 0 { |
||||
|
o.FilePerm = defaultFilePerm |
||||
|
} |
||||
|
|
||||
|
d := &Diskv{ |
||||
|
Options: o, |
||||
|
cache: map[string][]byte{}, |
||||
|
cacheSize: 0, |
||||
|
} |
||||
|
|
||||
|
if d.Index != nil && d.IndexLess != nil { |
||||
|
d.Index.Initialize(d.IndexLess, d.Keys(nil)) |
||||
|
} |
||||
|
|
||||
|
return d |
||||
|
} |
||||
|
|
||||
|
// Write synchronously writes the key-value pair to disk, making it immediately
|
||||
|
// available for reads. Write relies on the filesystem to perform an eventual
|
||||
|
// sync to physical media. If you need stronger guarantees, see WriteStream.
|
||||
|
func (d *Diskv) Write(key string, val []byte) error { |
||||
|
return d.WriteStream(key, bytes.NewBuffer(val), false) |
||||
|
} |
||||
|
|
||||
|
// WriteStream writes the data represented by the io.Reader to the disk, under
|
||||
|
// the provided key. If sync is true, WriteStream performs an explicit sync on
|
||||
|
// the file as soon as it's written.
|
||||
|
//
|
||||
|
// bytes.Buffer provides io.Reader semantics for basic data types.
|
||||
|
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error { |
||||
|
if len(key) <= 0 { |
||||
|
return errEmptyKey |
||||
|
} |
||||
|
|
||||
|
d.mu.Lock() |
||||
|
defer d.mu.Unlock() |
||||
|
|
||||
|
return d.writeStreamWithLock(key, r, sync) |
||||
|
} |
||||
|
|
||||
|
// writeStream does no input validation checking.
|
||||
|
// TODO: use atomic FS ops.
|
||||
|
func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error { |
||||
|
if err := d.ensurePathWithLock(key); err != nil { |
||||
|
return fmt.Errorf("ensure path: %s", err) |
||||
|
} |
||||
|
|
||||
|
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
|
||||
|
f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm) |
||||
|
if err != nil { |
||||
|
return fmt.Errorf("open file: %s", err) |
||||
|
} |
||||
|
|
||||
|
wc := io.WriteCloser(&nopWriteCloser{f}) |
||||
|
if d.Compression != nil { |
||||
|
wc, err = d.Compression.Writer(f) |
||||
|
if err != nil { |
||||
|
f.Close() // error deliberately ignored
|
||||
|
return fmt.Errorf("compression writer: %s", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if _, err := io.Copy(wc, r); err != nil { |
||||
|
f.Close() // error deliberately ignored
|
||||
|
return fmt.Errorf("i/o copy: %s", err) |
||||
|
} |
||||
|
|
||||
|
if err := wc.Close(); err != nil { |
||||
|
f.Close() // error deliberately ignored
|
||||
|
return fmt.Errorf("compression close: %s", err) |
||||
|
} |
||||
|
|
||||
|
if sync { |
||||
|
if err := f.Sync(); err != nil { |
||||
|
f.Close() // error deliberately ignored
|
||||
|
return fmt.Errorf("file sync: %s", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if err := f.Close(); err != nil { |
||||
|
return fmt.Errorf("file close: %s", err) |
||||
|
} |
||||
|
|
||||
|
if d.Index != nil { |
||||
|
d.Index.Insert(key) |
||||
|
} |
||||
|
|
||||
|
d.bustCacheWithLock(key) // cache only on read
|
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// Import imports the source file into diskv under the destination key. If the
|
||||
|
// destination key already exists, it's overwritten. If move is true, the
|
||||
|
// source file is removed after a successful import.
|
||||
|
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) { |
||||
|
if dstKey == "" { |
||||
|
return errEmptyKey |
||||
|
} |
||||
|
|
||||
|
if fi, err := os.Stat(srcFilename); err != nil { |
||||
|
return err |
||||
|
} else if fi.IsDir() { |
||||
|
return errImportDirectory |
||||
|
} |
||||
|
|
||||
|
d.mu.Lock() |
||||
|
defer d.mu.Unlock() |
||||
|
|
||||
|
if err := d.ensurePathWithLock(dstKey); err != nil { |
||||
|
return fmt.Errorf("ensure path: %s", err) |
||||
|
} |
||||
|
|
||||
|
if move { |
||||
|
if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil { |
||||
|
d.bustCacheWithLock(dstKey) |
||||
|
return nil |
||||
|
} else if err != syscall.EXDEV { |
||||
|
// If it failed due to being on a different device, fall back to copying
|
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
f, err := os.Open(srcFilename) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
defer f.Close() |
||||
|
err = d.writeStreamWithLock(dstKey, f, false) |
||||
|
if err == nil && move { |
||||
|
err = os.Remove(srcFilename) |
||||
|
} |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// Read reads the key and returns the value.
|
||||
|
// If the key is available in the cache, Read won't touch the disk.
|
||||
|
// If the key is not in the cache, Read will have the side-effect of
|
||||
|
// lazily caching the value.
|
||||
|
func (d *Diskv) Read(key string) ([]byte, error) { |
||||
|
rc, err := d.ReadStream(key, false) |
||||
|
if err != nil { |
||||
|
return []byte{}, err |
||||
|
} |
||||
|
defer rc.Close() |
||||
|
return ioutil.ReadAll(rc) |
||||
|
} |
||||
|
|
||||
|
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
|
||||
|
// If the value is cached from a previous read, and direct is false,
|
||||
|
// ReadStream will use the cached value. Otherwise, it will return a handle to
|
||||
|
// the file on disk, and cache the data on read.
|
||||
|
//
|
||||
|
// If direct is true, ReadStream will lazily delete any cached value for the
|
||||
|
// key, and return a direct handle to the file on disk.
|
||||
|
//
|
||||
|
// If compression is enabled, ReadStream taps into the io.Reader stream prior
|
||||
|
// to decompression, and caches the compressed data.
|
||||
|
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) { |
||||
|
d.mu.RLock() |
||||
|
defer d.mu.RUnlock() |
||||
|
|
||||
|
if val, ok := d.cache[key]; ok { |
||||
|
if !direct { |
||||
|
buf := bytes.NewBuffer(val) |
||||
|
if d.Compression != nil { |
||||
|
return d.Compression.Reader(buf) |
||||
|
} |
||||
|
return ioutil.NopCloser(buf), nil |
||||
|
} |
||||
|
|
||||
|
go func() { |
||||
|
d.mu.Lock() |
||||
|
defer d.mu.Unlock() |
||||
|
d.uncacheWithLock(key, uint64(len(val))) |
||||
|
}() |
||||
|
} |
||||
|
|
||||
|
return d.readWithRLock(key) |
||||
|
} |
||||
|
|
||||
|
// read ignores the cache, and returns an io.ReadCloser representing the
|
||||
|
// decompressed data for the given key, streamed from the disk. Clients should
|
||||
|
// acquire a read lock on the Diskv and check the cache themselves before
|
||||
|
// calling read.
|
||||
|
func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) { |
||||
|
filename := d.completeFilename(key) |
||||
|
|
||||
|
fi, err := os.Stat(filename) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
if fi.IsDir() { |
||||
|
return nil, os.ErrNotExist |
||||
|
} |
||||
|
|
||||
|
f, err := os.Open(filename) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
var r io.Reader |
||||
|
if d.CacheSizeMax > 0 { |
||||
|
r = newSiphon(f, d, key) |
||||
|
} else { |
||||
|
r = &closingReader{f} |
||||
|
} |
||||
|
|
||||
|
var rc = io.ReadCloser(ioutil.NopCloser(r)) |
||||
|
if d.Compression != nil { |
||||
|
rc, err = d.Compression.Reader(r) |
||||
|
if err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return rc, nil |
||||
|
} |
||||
|
|
||||
|
// closingReader provides a Reader that automatically closes the
|
||||
|
// embedded ReadCloser when it reaches EOF
|
||||
|
type closingReader struct { |
||||
|
rc io.ReadCloser |
||||
|
} |
||||
|
|
||||
|
func (cr closingReader) Read(p []byte) (int, error) { |
||||
|
n, err := cr.rc.Read(p) |
||||
|
if err == io.EOF { |
||||
|
if closeErr := cr.rc.Close(); closeErr != nil { |
||||
|
return n, closeErr // close must succeed for Read to succeed
|
||||
|
} |
||||
|
} |
||||
|
return n, err |
||||
|
} |
||||
|
|
||||
|
// siphon is like a TeeReader: it copies all data read through it to an
|
||||
|
// internal buffer, and moves that buffer to the cache at EOF.
|
||||
|
type siphon struct { |
||||
|
f *os.File |
||||
|
d *Diskv |
||||
|
key string |
||||
|
buf *bytes.Buffer |
||||
|
} |
||||
|
|
||||
|
// newSiphon constructs a siphoning reader that represents the passed file.
|
||||
|
// When a successful series of reads ends in an EOF, the siphon will write
|
||||
|
// the buffered data to Diskv's cache under the given key.
|
||||
|
func newSiphon(f *os.File, d *Diskv, key string) io.Reader { |
||||
|
return &siphon{ |
||||
|
f: f, |
||||
|
d: d, |
||||
|
key: key, |
||||
|
buf: &bytes.Buffer{}, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Read implements the io.Reader interface for siphon.
|
||||
|
func (s *siphon) Read(p []byte) (int, error) { |
||||
|
n, err := s.f.Read(p) |
||||
|
|
||||
|
if err == nil { |
||||
|
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
|
||||
|
} |
||||
|
|
||||
|
if err == io.EOF { |
||||
|
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
|
||||
|
if closeErr := s.f.Close(); closeErr != nil { |
||||
|
return n, closeErr // close must succeed for Read to succeed
|
||||
|
} |
||||
|
return n, err |
||||
|
} |
||||
|
|
||||
|
return n, err |
||||
|
} |
||||
|
|
||||
|
// Erase synchronously erases the given key from the disk and the cache.
|
||||
|
func (d *Diskv) Erase(key string) error { |
||||
|
d.mu.Lock() |
||||
|
defer d.mu.Unlock() |
||||
|
|
||||
|
d.bustCacheWithLock(key) |
||||
|
|
||||
|
// erase from index
|
||||
|
if d.Index != nil { |
||||
|
d.Index.Delete(key) |
||||
|
} |
||||
|
|
||||
|
// erase from disk
|
||||
|
filename := d.completeFilename(key) |
||||
|
if s, err := os.Stat(filename); err == nil { |
||||
|
if s.IsDir() { |
||||
|
return errBadKey |
||||
|
} |
||||
|
if err = os.Remove(filename); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} else { |
||||
|
// Return err as-is so caller can do os.IsNotExist(err).
|
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// clean up and return
|
||||
|
d.pruneDirsWithLock(key) |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// EraseAll will delete all of the data from the store, both in the cache and on
|
||||
|
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
|
||||
|
// diskv-related data. Care should be taken to always specify a diskv base
|
||||
|
// directory that is exclusively for diskv data.
|
||||
|
func (d *Diskv) EraseAll() error { |
||||
|
d.mu.Lock() |
||||
|
defer d.mu.Unlock() |
||||
|
d.cache = make(map[string][]byte) |
||||
|
d.cacheSize = 0 |
||||
|
return os.RemoveAll(d.BasePath) |
||||
|
} |
||||
|
|
||||
|
// Has returns true if the given key exists.
|
||||
|
func (d *Diskv) Has(key string) bool { |
||||
|
d.mu.Lock() |
||||
|
defer d.mu.Unlock() |
||||
|
|
||||
|
if _, ok := d.cache[key]; ok { |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
filename := d.completeFilename(key) |
||||
|
s, err := os.Stat(filename) |
||||
|
if err != nil { |
||||
|
return false |
||||
|
} |
||||
|
if s.IsDir() { |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
// Keys returns a channel that will yield every key accessible by the store,
|
||||
|
// in undefined order. If a cancel channel is provided, closing it will
|
||||
|
// terminate and close the keys channel.
|
||||
|
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string { |
||||
|
return d.KeysPrefix("", cancel) |
||||
|
} |
||||
|
|
||||
|
// KeysPrefix returns a channel that will yield every key accessible by the
|
||||
|
// store with the given prefix, in undefined order. If a cancel channel is
|
||||
|
// provided, closing it will terminate and close the keys channel. If the
|
||||
|
// provided prefix is the empty string, all keys will be yielded.
|
||||
|
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string { |
||||
|
var prepath string |
||||
|
if prefix == "" { |
||||
|
prepath = d.BasePath |
||||
|
} else { |
||||
|
prepath = d.pathFor(prefix) |
||||
|
} |
||||
|
c := make(chan string) |
||||
|
go func() { |
||||
|
filepath.Walk(prepath, walker(c, prefix, cancel)) |
||||
|
close(c) |
||||
|
}() |
||||
|
return c |
||||
|
} |
||||
|
|
||||
|
// walker returns a function which satisfies the filepath.WalkFunc interface.
|
||||
|
// It sends every non-directory file entry down the channel c.
|
||||
|
func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc { |
||||
|
return func(path string, info os.FileInfo, err error) error { |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) { |
||||
|
return nil // "pass"
|
||||
|
} |
||||
|
|
||||
|
select { |
||||
|
case c <- info.Name(): |
||||
|
case <-cancel: |
||||
|
return errCanceled |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// pathFor returns the absolute path for location on the filesystem where the
|
||||
|
// data for the given key will be stored.
|
||||
|
func (d *Diskv) pathFor(key string) string { |
||||
|
return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...)) |
||||
|
} |
||||
|
|
||||
|
// ensurePathWithLock is a helper function that generates all necessary
|
||||
|
// directories on the filesystem for the given key.
|
||||
|
func (d *Diskv) ensurePathWithLock(key string) error { |
||||
|
return os.MkdirAll(d.pathFor(key), d.PathPerm) |
||||
|
} |
||||
|
|
||||
|
// completeFilename returns the absolute path to the file for the given key.
|
||||
|
func (d *Diskv) completeFilename(key string) string { |
||||
|
return filepath.Join(d.pathFor(key), key) |
||||
|
} |
||||
|
|
||||
|
// cacheWithLock attempts to cache the given key-value pair in the store's
|
||||
|
// cache. It can fail if the value is larger than the cache's maximum size.
|
||||
|
func (d *Diskv) cacheWithLock(key string, val []byte) error { |
||||
|
valueSize := uint64(len(val)) |
||||
|
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil { |
||||
|
return fmt.Errorf("%s; not caching", err) |
||||
|
} |
||||
|
|
||||
|
// be very strict about memory guarantees
|
||||
|
if (d.cacheSize + valueSize) > d.CacheSizeMax { |
||||
|
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax)) |
||||
|
} |
||||
|
|
||||
|
d.cache[key] = val |
||||
|
d.cacheSize += valueSize |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
|
||||
|
func (d *Diskv) cacheWithoutLock(key string, val []byte) error { |
||||
|
d.mu.Lock() |
||||
|
defer d.mu.Unlock() |
||||
|
return d.cacheWithLock(key, val) |
||||
|
} |
||||
|
|
||||
|
func (d *Diskv) bustCacheWithLock(key string) { |
||||
|
if val, ok := d.cache[key]; ok { |
||||
|
d.uncacheWithLock(key, uint64(len(val))) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (d *Diskv) uncacheWithLock(key string, sz uint64) { |
||||
|
d.cacheSize -= sz |
||||
|
delete(d.cache, key) |
||||
|
} |
||||
|
|
||||
|
// pruneDirsWithLock deletes empty directories in the path walk leading to the
|
||||
|
// key k. Typically this function is called after an Erase is made.
|
||||
|
func (d *Diskv) pruneDirsWithLock(key string) error { |
||||
|
pathlist := d.Transform(key) |
||||
|
for i := range pathlist { |
||||
|
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...)) |
||||
|
|
||||
|
// thanks to Steven Blenkinsop for this snippet
|
||||
|
switch fi, err := os.Stat(dir); true { |
||||
|
case err != nil: |
||||
|
return err |
||||
|
case !fi.IsDir(): |
||||
|
panic(fmt.Sprintf("corrupt dirstate at %s", dir)) |
||||
|
} |
||||
|
|
||||
|
nlinks, err := filepath.Glob(filepath.Join(dir, "*")) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} else if len(nlinks) > 0 { |
||||
|
return nil // has subdirs -- do not prune
|
||||
|
} |
||||
|
if err = os.Remove(dir); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
|
||||
|
// until the cache has at least valueSize bytes available.
|
||||
|
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error { |
||||
|
if valueSize > d.CacheSizeMax { |
||||
|
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax) |
||||
|
} |
||||
|
|
||||
|
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax } |
||||
|
|
||||
|
for key, val := range d.cache { |
||||
|
if safe() { |
||||
|
break |
||||
|
} |
||||
|
|
||||
|
d.uncacheWithLock(key, uint64(len(val))) |
||||
|
} |
||||
|
|
||||
|
if !safe() { |
||||
|
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax)) |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
|
||||
|
// satisfy the io.WriteCloser interface.
|
||||
|
type nopWriteCloser struct { |
||||
|
io.Writer |
||||
|
} |
||||
|
|
||||
|
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) } |
||||
|
func (wc *nopWriteCloser) Close() error { return nil } |
@ -0,0 +1,63 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"crypto/md5" |
||||
|
"fmt" |
||||
|
"io" |
||||
|
|
||||
|
"github.com/peterbourgon/diskv" |
||||
|
) |
||||
|
|
||||
|
const transformBlockSize = 2 // grouping of chars per directory depth
|
||||
|
|
||||
|
func blockTransform(s string) []string { |
||||
|
var ( |
||||
|
sliceSize = len(s) / transformBlockSize |
||||
|
pathSlice = make([]string, sliceSize) |
||||
|
) |
||||
|
for i := 0; i < sliceSize; i++ { |
||||
|
from, to := i*transformBlockSize, (i*transformBlockSize)+transformBlockSize |
||||
|
pathSlice[i] = s[from:to] |
||||
|
} |
||||
|
return pathSlice |
||||
|
} |
||||
|
|
||||
|
func main() { |
||||
|
d := diskv.New(diskv.Options{ |
||||
|
BasePath: "data", |
||||
|
Transform: blockTransform, |
||||
|
CacheSizeMax: 1024 * 1024, // 1MB
|
||||
|
}) |
||||
|
|
||||
|
for _, valueStr := range []string{ |
||||
|
"I am the very model of a modern Major-General", |
||||
|
"I've information vegetable, animal, and mineral", |
||||
|
"I know the kings of England, and I quote the fights historical", |
||||
|
"From Marathon to Waterloo, in order categorical", |
||||
|
"I'm very well acquainted, too, with matters mathematical", |
||||
|
"I understand equations, both the simple and quadratical", |
||||
|
"About binomial theorem I'm teeming with a lot o' news", |
||||
|
"With many cheerful facts about the square of the hypotenuse", |
||||
|
} { |
||||
|
d.Write(md5sum(valueStr), []byte(valueStr)) |
||||
|
} |
||||
|
|
||||
|
var keyCount int |
||||
|
for key := range d.Keys(nil) { |
||||
|
val, err := d.Read(key) |
||||
|
if err != nil { |
||||
|
panic(fmt.Sprintf("key %s had no value", key)) |
||||
|
} |
||||
|
fmt.Printf("%s: %s\n", key, val) |
||||
|
keyCount++ |
||||
|
} |
||||
|
fmt.Printf("%d total keys\n", keyCount) |
||||
|
|
||||
|
// d.EraseAll() // leave it commented out to see how data is kept on disk
|
||||
|
} |
||||
|
|
||||
|
func md5sum(s string) string { |
||||
|
h := md5.New() |
||||
|
io.WriteString(h, s) |
||||
|
return fmt.Sprintf("%x", h.Sum(nil)) |
||||
|
} |
@ -0,0 +1,30 @@ |
|||||
|
package main |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
|
||||
|
"github.com/peterbourgon/diskv" |
||||
|
) |
||||
|
|
||||
|
func main() { |
||||
|
d := diskv.New(diskv.Options{ |
||||
|
BasePath: "my-diskv-data-directory", |
||||
|
Transform: func(s string) []string { return []string{} }, |
||||
|
CacheSizeMax: 1024 * 1024, // 1MB
|
||||
|
}) |
||||
|
|
||||
|
key := "alpha" |
||||
|
if err := d.Write(key, []byte{'1', '2', '3'}); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
|
||||
|
value, err := d.Read(key) |
||||
|
if err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
fmt.Printf("%v\n", value) |
||||
|
|
||||
|
if err := d.Erase(key); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
} |
@ -0,0 +1,76 @@ |
|||||
|
package diskv_test |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"io/ioutil" |
||||
|
"os" |
||||
|
|
||||
|
"github.com/peterbourgon/diskv" |
||||
|
|
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestImportMove(t *testing.T) { |
||||
|
b := []byte(`0123456789`) |
||||
|
f, err := ioutil.TempFile("", "temp-test") |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
if _, err := f.Write(b); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
f.Close() |
||||
|
|
||||
|
d := diskv.New(diskv.Options{ |
||||
|
BasePath: "test-import-move", |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
key := "key" |
||||
|
|
||||
|
if err := d.Write(key, []byte(`TBD`)); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
if err := d.Import(f.Name(), key, true); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
if _, err := os.Stat(f.Name()); err == nil || !os.IsNotExist(err) { |
||||
|
t.Errorf("expected temp file to be gone, but err = %v", err) |
||||
|
} |
||||
|
|
||||
|
if !d.Has(key) { |
||||
|
t.Errorf("%q not present", key) |
||||
|
} |
||||
|
|
||||
|
if buf, err := d.Read(key); err != nil || bytes.Compare(b, buf) != 0 { |
||||
|
t.Errorf("want %q, have %q (err = %v)", string(b), string(buf), err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestImportCopy(t *testing.T) { |
||||
|
b := []byte(`¡åéîòü!`) |
||||
|
|
||||
|
f, err := ioutil.TempFile("", "temp-test") |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
if _, err := f.Write(b); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
f.Close() |
||||
|
|
||||
|
d := diskv.New(diskv.Options{ |
||||
|
BasePath: "test-import-copy", |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
if err := d.Import(f.Name(), "key", false); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
if _, err := os.Stat(f.Name()); err != nil { |
||||
|
t.Errorf("expected temp file to remain, but got err = %v", err) |
||||
|
} |
||||
|
} |
@ -0,0 +1,115 @@ |
|||||
|
package diskv |
||||
|
|
||||
|
import ( |
||||
|
"sync" |
||||
|
|
||||
|
"github.com/google/btree" |
||||
|
) |
||||
|
|
||||
|
// Index is a generic interface for things that can
|
||||
|
// provide an ordered list of keys.
|
||||
|
type Index interface { |
||||
|
Initialize(less LessFunction, keys <-chan string) |
||||
|
Insert(key string) |
||||
|
Delete(key string) |
||||
|
Keys(from string, n int) []string |
||||
|
} |
||||
|
|
||||
|
// LessFunction is used to initialize an Index of keys in a specific order.
|
||||
|
type LessFunction func(string, string) bool |
||||
|
|
||||
|
// btreeString is a custom data type that satisfies the BTree Less interface,
|
||||
|
// making the strings it wraps sortable by the BTree package.
|
||||
|
type btreeString struct { |
||||
|
s string |
||||
|
l LessFunction |
||||
|
} |
||||
|
|
||||
|
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
|
||||
|
func (s btreeString) Less(i btree.Item) bool { |
||||
|
return s.l(s.s, i.(btreeString).s) |
||||
|
} |
||||
|
|
||||
|
// BTreeIndex is an implementation of the Index interface using google/btree.
|
||||
|
type BTreeIndex struct { |
||||
|
sync.RWMutex |
||||
|
LessFunction |
||||
|
*btree.BTree |
||||
|
} |
||||
|
|
||||
|
// Initialize populates the BTree tree with data from the keys channel,
|
||||
|
// according to the passed less function. It's destructive to the BTreeIndex.
|
||||
|
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) { |
||||
|
i.Lock() |
||||
|
defer i.Unlock() |
||||
|
i.LessFunction = less |
||||
|
i.BTree = rebuild(less, keys) |
||||
|
} |
||||
|
|
||||
|
// Insert inserts the given key (only) into the BTree tree.
|
||||
|
func (i *BTreeIndex) Insert(key string) { |
||||
|
i.Lock() |
||||
|
defer i.Unlock() |
||||
|
if i.BTree == nil || i.LessFunction == nil { |
||||
|
panic("uninitialized index") |
||||
|
} |
||||
|
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction}) |
||||
|
} |
||||
|
|
||||
|
// Delete removes the given key (only) from the BTree tree.
|
||||
|
func (i *BTreeIndex) Delete(key string) { |
||||
|
i.Lock() |
||||
|
defer i.Unlock() |
||||
|
if i.BTree == nil || i.LessFunction == nil { |
||||
|
panic("uninitialized index") |
||||
|
} |
||||
|
i.BTree.Delete(btreeString{s: key, l: i.LessFunction}) |
||||
|
} |
||||
|
|
||||
|
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
|
||||
|
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
|
||||
|
// first key in the returned slice will be the key that immediately follows the
|
||||
|
// passed key, in key order.
|
||||
|
func (i *BTreeIndex) Keys(from string, n int) []string { |
||||
|
i.RLock() |
||||
|
defer i.RUnlock() |
||||
|
|
||||
|
if i.BTree == nil || i.LessFunction == nil { |
||||
|
panic("uninitialized index") |
||||
|
} |
||||
|
|
||||
|
if i.BTree.Len() <= 0 { |
||||
|
return []string{} |
||||
|
} |
||||
|
|
||||
|
btreeFrom := btreeString{s: from, l: i.LessFunction} |
||||
|
skipFirst := true |
||||
|
if len(from) <= 0 || !i.BTree.Has(btreeFrom) { |
||||
|
// no such key, so fabricate an always-smallest item
|
||||
|
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }} |
||||
|
skipFirst = false |
||||
|
} |
||||
|
|
||||
|
keys := []string{} |
||||
|
iterator := func(i btree.Item) bool { |
||||
|
keys = append(keys, i.(btreeString).s) |
||||
|
return len(keys) < n |
||||
|
} |
||||
|
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator) |
||||
|
|
||||
|
if skipFirst && len(keys) > 0 { |
||||
|
keys = keys[1:] |
||||
|
} |
||||
|
|
||||
|
return keys |
||||
|
} |
||||
|
|
||||
|
// rebuildIndex does the work of regenerating the index
|
||||
|
// with the given keys.
|
||||
|
func rebuild(less LessFunction, keys <-chan string) *btree.BTree { |
||||
|
tree := btree.New(2) |
||||
|
for key := range keys { |
||||
|
tree.ReplaceOrInsert(btreeString{s: key, l: less}) |
||||
|
} |
||||
|
return tree |
||||
|
} |
@ -0,0 +1,148 @@ |
|||||
|
package diskv |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"reflect" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
func strLess(a, b string) bool { return a < b } |
||||
|
|
||||
|
func cmpStrings(a, b []string) bool { |
||||
|
if len(a) != len(b) { |
||||
|
return false |
||||
|
} |
||||
|
for i := 0; i < len(a); i++ { |
||||
|
if a[i] != b[i] { |
||||
|
return false |
||||
|
} |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
func (d *Diskv) isIndexed(key string) bool { |
||||
|
if d.Index == nil { |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
for _, got := range d.Index.Keys("", 1000) { |
||||
|
if got == key { |
||||
|
return true |
||||
|
} |
||||
|
} |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
func TestIndexOrder(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "index-test", |
||||
|
Transform: func(string) []string { return []string{} }, |
||||
|
CacheSizeMax: 1024, |
||||
|
Index: &BTreeIndex{}, |
||||
|
IndexLess: strLess, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
v := []byte{'1', '2', '3'} |
||||
|
d.Write("a", v) |
||||
|
if !d.isIndexed("a") { |
||||
|
t.Fatalf("'a' not indexed after write") |
||||
|
} |
||||
|
d.Write("1", v) |
||||
|
d.Write("m", v) |
||||
|
d.Write("-", v) |
||||
|
d.Write("A", v) |
||||
|
|
||||
|
expectedKeys := []string{"-", "1", "A", "a", "m"} |
||||
|
keys := []string{} |
||||
|
for _, key := range d.Index.Keys("", 100) { |
||||
|
keys = append(keys, key) |
||||
|
} |
||||
|
|
||||
|
if !cmpStrings(keys, expectedKeys) { |
||||
|
t.Fatalf("got %s, expected %s", keys, expectedKeys) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestIndexLoad(t *testing.T) { |
||||
|
d1 := New(Options{ |
||||
|
BasePath: "index-test", |
||||
|
Transform: func(string) []string { return []string{} }, |
||||
|
CacheSizeMax: 1024, |
||||
|
}) |
||||
|
defer d1.EraseAll() |
||||
|
|
||||
|
val := []byte{'1', '2', '3'} |
||||
|
keys := []string{"a", "b", "c", "d", "e", "f", "g"} |
||||
|
for _, key := range keys { |
||||
|
d1.Write(key, val) |
||||
|
} |
||||
|
|
||||
|
d2 := New(Options{ |
||||
|
BasePath: "index-test", |
||||
|
Transform: func(string) []string { return []string{} }, |
||||
|
CacheSizeMax: 1024, |
||||
|
Index: &BTreeIndex{}, |
||||
|
IndexLess: strLess, |
||||
|
}) |
||||
|
defer d2.EraseAll() |
||||
|
|
||||
|
// check d2 has properly loaded existing d1 data
|
||||
|
for _, key := range keys { |
||||
|
if !d2.isIndexed(key) { |
||||
|
t.Fatalf("key '%s' not indexed on secondary", key) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// cache one
|
||||
|
if readValue, err := d2.Read(keys[0]); err != nil { |
||||
|
t.Fatalf("%s", err) |
||||
|
} else if bytes.Compare(val, readValue) != 0 { |
||||
|
t.Fatalf("%s: got %s, expected %s", keys[0], readValue, val) |
||||
|
} |
||||
|
|
||||
|
// make sure it got cached
|
||||
|
for i := 0; i < 10 && !d2.isCached(keys[0]); i++ { |
||||
|
time.Sleep(10 * time.Millisecond) |
||||
|
} |
||||
|
if !d2.isCached(keys[0]) { |
||||
|
t.Fatalf("key '%s' not cached", keys[0]) |
||||
|
} |
||||
|
|
||||
|
// kill the disk
|
||||
|
d1.EraseAll() |
||||
|
|
||||
|
// cached value should still be there in the second
|
||||
|
if readValue, err := d2.Read(keys[0]); err != nil { |
||||
|
t.Fatalf("%s", err) |
||||
|
} else if bytes.Compare(val, readValue) != 0 { |
||||
|
t.Fatalf("%s: got %s, expected %s", keys[0], readValue, val) |
||||
|
} |
||||
|
|
||||
|
// but not in the original
|
||||
|
if _, err := d1.Read(keys[0]); err == nil { |
||||
|
t.Fatalf("expected error reading from flushed store") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestIndexKeysEmptyFrom(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "index-test", |
||||
|
Transform: func(string) []string { return []string{} }, |
||||
|
CacheSizeMax: 1024, |
||||
|
Index: &BTreeIndex{}, |
||||
|
IndexLess: strLess, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
for _, k := range []string{"a", "c", "z", "b", "x", "b", "y"} { |
||||
|
d.Write(k, []byte("1")) |
||||
|
} |
||||
|
|
||||
|
want := []string{"a", "b", "c", "x", "y", "z"} |
||||
|
have := d.Index.Keys("", 99) |
||||
|
if !reflect.DeepEqual(want, have) { |
||||
|
t.Errorf("want %v, have %v", want, have) |
||||
|
} |
||||
|
} |
@ -0,0 +1,121 @@ |
|||||
|
package diskv |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"io/ioutil" |
||||
|
"sync" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
// ReadStream from cache shouldn't panic on a nil dereference from a nonexistent
|
||||
|
// Compression :)
|
||||
|
func TestIssue2A(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "test-issue-2a", |
||||
|
Transform: func(string) []string { return []string{} }, |
||||
|
CacheSizeMax: 1024, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
input := "abcdefghijklmnopqrstuvwxy" |
||||
|
key, writeBuf, sync := "a", bytes.NewBufferString(input), false |
||||
|
if err := d.WriteStream(key, writeBuf, sync); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
for i := 0; i < 2; i++ { |
||||
|
began := time.Now() |
||||
|
rc, err := d.ReadStream(key, false) |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
buf, err := ioutil.ReadAll(rc) |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
if !cmpBytes(buf, []byte(input)) { |
||||
|
t.Fatalf("read #%d: '%s' != '%s'", i+1, string(buf), input) |
||||
|
} |
||||
|
rc.Close() |
||||
|
t.Logf("read #%d in %s", i+1, time.Since(began)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// ReadStream on a key that resolves to a directory should return an error.
|
||||
|
func TestIssue2B(t *testing.T) { |
||||
|
blockTransform := func(s string) []string { |
||||
|
transformBlockSize := 3 |
||||
|
sliceSize := len(s) / transformBlockSize |
||||
|
pathSlice := make([]string, sliceSize) |
||||
|
for i := 0; i < sliceSize; i++ { |
||||
|
from, to := i*transformBlockSize, (i*transformBlockSize)+transformBlockSize |
||||
|
pathSlice[i] = s[from:to] |
||||
|
} |
||||
|
return pathSlice |
||||
|
} |
||||
|
|
||||
|
d := New(Options{ |
||||
|
BasePath: "test-issue-2b", |
||||
|
Transform: blockTransform, |
||||
|
CacheSizeMax: 0, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
v := []byte{'1', '2', '3'} |
||||
|
if err := d.Write("abcabc", v); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
_, err := d.ReadStream("abc", false) |
||||
|
if err == nil { |
||||
|
t.Fatal("ReadStream('abc') should return error") |
||||
|
} |
||||
|
t.Logf("ReadStream('abc') returned error: %v", err) |
||||
|
} |
||||
|
|
||||
|
// Ensure ReadStream with direct=true isn't racy.
|
||||
|
func TestIssue17(t *testing.T) { |
||||
|
var ( |
||||
|
basePath = "test-data" |
||||
|
) |
||||
|
|
||||
|
dWrite := New(Options{ |
||||
|
BasePath: basePath, |
||||
|
CacheSizeMax: 0, |
||||
|
}) |
||||
|
defer dWrite.EraseAll() |
||||
|
|
||||
|
dRead := New(Options{ |
||||
|
BasePath: basePath, |
||||
|
CacheSizeMax: 50, |
||||
|
}) |
||||
|
|
||||
|
cases := map[string]string{ |
||||
|
"a": `1234567890`, |
||||
|
"b": `2345678901`, |
||||
|
"c": `3456789012`, |
||||
|
"d": `4567890123`, |
||||
|
"e": `5678901234`, |
||||
|
} |
||||
|
|
||||
|
for k, v := range cases { |
||||
|
if err := dWrite.Write(k, []byte(v)); err != nil { |
||||
|
t.Fatalf("during write: %s", err) |
||||
|
} |
||||
|
dRead.Read(k) // ensure it's added to cache
|
||||
|
} |
||||
|
|
||||
|
var wg sync.WaitGroup |
||||
|
start := make(chan struct{}) |
||||
|
for k, v := range cases { |
||||
|
wg.Add(1) |
||||
|
go func(k, v string) { |
||||
|
<-start |
||||
|
dRead.ReadStream(k, true) |
||||
|
wg.Done() |
||||
|
}(k, v) |
||||
|
} |
||||
|
close(start) |
||||
|
wg.Wait() |
||||
|
} |
@ -0,0 +1,231 @@ |
|||||
|
package diskv_test |
||||
|
|
||||
|
import ( |
||||
|
"reflect" |
||||
|
"runtime" |
||||
|
"strings" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/peterbourgon/diskv" |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
keysTestData = map[string]string{ |
||||
|
"ab01cd01": "When we started building CoreOS", |
||||
|
"ab01cd02": "we looked at all the various components available to us", |
||||
|
"ab01cd03": "re-using the best tools", |
||||
|
"ef01gh04": "and building the ones that did not exist", |
||||
|
"ef02gh05": "We believe strongly in the Unix philosophy", |
||||
|
"xxxxxxxx": "tools should be independently useful", |
||||
|
} |
||||
|
|
||||
|
prefixes = []string{ |
||||
|
"", // all
|
||||
|
"a", |
||||
|
"ab", |
||||
|
"ab0", |
||||
|
"ab01", |
||||
|
"ab01cd0", |
||||
|
"ab01cd01", |
||||
|
"ab01cd01x", // none
|
||||
|
"b", // none
|
||||
|
"b0", // none
|
||||
|
"0", // none
|
||||
|
"01", // none
|
||||
|
"e", |
||||
|
"ef", |
||||
|
"efx", // none
|
||||
|
"ef01gh0", |
||||
|
"ef01gh04", |
||||
|
"ef01gh05", |
||||
|
"ef01gh06", // none
|
||||
|
} |
||||
|
) |
||||
|
|
||||
|
func TestKeysFlat(t *testing.T) { |
||||
|
transform := func(s string) []string { |
||||
|
if s == "" { |
||||
|
t.Fatalf(`transform should not be called with ""`) |
||||
|
} |
||||
|
return []string{} |
||||
|
} |
||||
|
d := diskv.New(diskv.Options{ |
||||
|
BasePath: "test-data", |
||||
|
Transform: transform, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
for k, v := range keysTestData { |
||||
|
d.Write(k, []byte(v)) |
||||
|
} |
||||
|
|
||||
|
checkKeys(t, d.Keys(nil), keysTestData) |
||||
|
} |
||||
|
|
||||
|
func TestKeysNested(t *testing.T) { |
||||
|
d := diskv.New(diskv.Options{ |
||||
|
BasePath: "test-data", |
||||
|
Transform: blockTransform(2), |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
for k, v := range keysTestData { |
||||
|
d.Write(k, []byte(v)) |
||||
|
} |
||||
|
|
||||
|
checkKeys(t, d.Keys(nil), keysTestData) |
||||
|
} |
||||
|
|
||||
|
func TestKeysPrefixFlat(t *testing.T) { |
||||
|
d := diskv.New(diskv.Options{ |
||||
|
BasePath: "test-data", |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
for k, v := range keysTestData { |
||||
|
d.Write(k, []byte(v)) |
||||
|
} |
||||
|
|
||||
|
for _, prefix := range prefixes { |
||||
|
checkKeys(t, d.KeysPrefix(prefix, nil), filterPrefix(keysTestData, prefix)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestKeysPrefixNested(t *testing.T) { |
||||
|
d := diskv.New(diskv.Options{ |
||||
|
BasePath: "test-data", |
||||
|
Transform: blockTransform(2), |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
for k, v := range keysTestData { |
||||
|
d.Write(k, []byte(v)) |
||||
|
} |
||||
|
|
||||
|
for _, prefix := range prefixes { |
||||
|
checkKeys(t, d.KeysPrefix(prefix, nil), filterPrefix(keysTestData, prefix)) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestKeysCancel(t *testing.T) { |
||||
|
d := diskv.New(diskv.Options{ |
||||
|
BasePath: "test-data", |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
for k, v := range keysTestData { |
||||
|
d.Write(k, []byte(v)) |
||||
|
} |
||||
|
|
||||
|
var ( |
||||
|
cancel = make(chan struct{}) |
||||
|
received = 0 |
||||
|
cancelAfter = len(keysTestData) / 2 |
||||
|
) |
||||
|
|
||||
|
for key := range d.Keys(cancel) { |
||||
|
received++ |
||||
|
|
||||
|
if received >= cancelAfter { |
||||
|
close(cancel) |
||||
|
runtime.Gosched() // allow walker to detect cancel
|
||||
|
} |
||||
|
|
||||
|
t.Logf("received %d: %q", received, key) |
||||
|
} |
||||
|
|
||||
|
if want, have := cancelAfter, received; want != have { |
||||
|
t.Errorf("want %d, have %d") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func checkKeys(t *testing.T, c <-chan string, want map[string]string) { |
||||
|
for k := range c { |
||||
|
if _, ok := want[k]; !ok { |
||||
|
t.Errorf("%q yielded but not expected", k) |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
delete(want, k) |
||||
|
t.Logf("%q yielded OK", k) |
||||
|
} |
||||
|
|
||||
|
if len(want) != 0 { |
||||
|
t.Errorf("%d expected key(s) not yielded: %s", len(want), strings.Join(flattenKeys(want), ", ")) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func blockTransform(blockSize int) func(string) []string { |
||||
|
return func(s string) []string { |
||||
|
var ( |
||||
|
sliceSize = len(s) / blockSize |
||||
|
pathSlice = make([]string, sliceSize) |
||||
|
) |
||||
|
for i := 0; i < sliceSize; i++ { |
||||
|
from, to := i*blockSize, (i*blockSize)+blockSize |
||||
|
pathSlice[i] = s[from:to] |
||||
|
} |
||||
|
return pathSlice |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func filterPrefix(in map[string]string, prefix string) map[string]string { |
||||
|
out := map[string]string{} |
||||
|
for k, v := range in { |
||||
|
if strings.HasPrefix(k, prefix) { |
||||
|
out[k] = v |
||||
|
} |
||||
|
} |
||||
|
return out |
||||
|
} |
||||
|
|
||||
|
func TestFilterPrefix(t *testing.T) { |
||||
|
input := map[string]string{ |
||||
|
"all": "", |
||||
|
"and": "", |
||||
|
"at": "", |
||||
|
"available": "", |
||||
|
"best": "", |
||||
|
"building": "", |
||||
|
"components": "", |
||||
|
"coreos": "", |
||||
|
"did": "", |
||||
|
"exist": "", |
||||
|
"looked": "", |
||||
|
"not": "", |
||||
|
"ones": "", |
||||
|
"re-using": "", |
||||
|
"started": "", |
||||
|
"that": "", |
||||
|
"the": "", |
||||
|
"to": "", |
||||
|
"tools": "", |
||||
|
"us": "", |
||||
|
"various": "", |
||||
|
"we": "", |
||||
|
"when": "", |
||||
|
} |
||||
|
|
||||
|
for prefix, want := range map[string]map[string]string{ |
||||
|
"a": map[string]string{"all": "", "and": "", "at": "", "available": ""}, |
||||
|
"al": map[string]string{"all": ""}, |
||||
|
"all": map[string]string{"all": ""}, |
||||
|
"alll": map[string]string{}, |
||||
|
"c": map[string]string{"components": "", "coreos": ""}, |
||||
|
"co": map[string]string{"components": "", "coreos": ""}, |
||||
|
"com": map[string]string{"components": ""}, |
||||
|
} { |
||||
|
have := filterPrefix(input, prefix) |
||||
|
if !reflect.DeepEqual(want, have) { |
||||
|
t.Errorf("%q: want %v, have %v", prefix, flattenKeys(want), flattenKeys(have)) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func flattenKeys(m map[string]string) []string { |
||||
|
a := make([]string, 0, len(m)) |
||||
|
for k := range m { |
||||
|
a = append(a, k) |
||||
|
} |
||||
|
return a |
||||
|
} |
@ -0,0 +1,153 @@ |
|||||
|
package diskv |
||||
|
|
||||
|
import ( |
||||
|
"fmt" |
||||
|
"math/rand" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func shuffle(keys []string) { |
||||
|
ints := rand.Perm(len(keys)) |
||||
|
for i := range keys { |
||||
|
keys[i], keys[ints[i]] = keys[ints[i]], keys[i] |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func genValue(size int) []byte { |
||||
|
v := make([]byte, size) |
||||
|
for i := 0; i < size; i++ { |
||||
|
v[i] = uint8((rand.Int() % 26) + 97) // a-z
|
||||
|
} |
||||
|
return v |
||||
|
} |
||||
|
|
||||
|
const ( |
||||
|
keyCount = 1000 |
||||
|
) |
||||
|
|
||||
|
func genKeys() []string { |
||||
|
keys := make([]string, keyCount) |
||||
|
for i := 0; i < keyCount; i++ { |
||||
|
keys[i] = fmt.Sprintf("%d", i) |
||||
|
} |
||||
|
return keys |
||||
|
} |
||||
|
|
||||
|
func (d *Diskv) load(keys []string, val []byte) { |
||||
|
for _, key := range keys { |
||||
|
d.Write(key, val) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func benchRead(b *testing.B, size, cachesz int) { |
||||
|
b.StopTimer() |
||||
|
d := New(Options{ |
||||
|
BasePath: "speed-test", |
||||
|
Transform: func(string) []string { return []string{} }, |
||||
|
CacheSizeMax: uint64(cachesz), |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
keys := genKeys() |
||||
|
value := genValue(size) |
||||
|
d.load(keys, value) |
||||
|
shuffle(keys) |
||||
|
b.SetBytes(int64(size)) |
||||
|
|
||||
|
b.StartTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
_, _ = d.Read(keys[i%len(keys)]) |
||||
|
} |
||||
|
b.StopTimer() |
||||
|
} |
||||
|
|
||||
|
func benchWrite(b *testing.B, size int, withIndex bool) { |
||||
|
b.StopTimer() |
||||
|
|
||||
|
options := Options{ |
||||
|
BasePath: "speed-test", |
||||
|
Transform: func(string) []string { return []string{} }, |
||||
|
CacheSizeMax: 0, |
||||
|
} |
||||
|
if withIndex { |
||||
|
options.Index = &BTreeIndex{} |
||||
|
options.IndexLess = strLess |
||||
|
} |
||||
|
|
||||
|
d := New(options) |
||||
|
defer d.EraseAll() |
||||
|
keys := genKeys() |
||||
|
value := genValue(size) |
||||
|
shuffle(keys) |
||||
|
b.SetBytes(int64(size)) |
||||
|
|
||||
|
b.StartTimer() |
||||
|
for i := 0; i < b.N; i++ { |
||||
|
d.Write(keys[i%len(keys)], value) |
||||
|
} |
||||
|
b.StopTimer() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkWrite__32B_NoIndex(b *testing.B) { |
||||
|
benchWrite(b, 32, false) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkWrite__1KB_NoIndex(b *testing.B) { |
||||
|
benchWrite(b, 1024, false) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkWrite__4KB_NoIndex(b *testing.B) { |
||||
|
benchWrite(b, 4096, false) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkWrite_10KB_NoIndex(b *testing.B) { |
||||
|
benchWrite(b, 10240, false) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkWrite__32B_WithIndex(b *testing.B) { |
||||
|
benchWrite(b, 32, true) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkWrite__1KB_WithIndex(b *testing.B) { |
||||
|
benchWrite(b, 1024, true) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkWrite__4KB_WithIndex(b *testing.B) { |
||||
|
benchWrite(b, 4096, true) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkWrite_10KB_WithIndex(b *testing.B) { |
||||
|
benchWrite(b, 10240, true) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkRead__32B_NoCache(b *testing.B) { |
||||
|
benchRead(b, 32, 0) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkRead__1KB_NoCache(b *testing.B) { |
||||
|
benchRead(b, 1024, 0) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkRead__4KB_NoCache(b *testing.B) { |
||||
|
benchRead(b, 4096, 0) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkRead_10KB_NoCache(b *testing.B) { |
||||
|
benchRead(b, 10240, 0) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkRead__32B_WithCache(b *testing.B) { |
||||
|
benchRead(b, 32, keyCount*32*2) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkRead__1KB_WithCache(b *testing.B) { |
||||
|
benchRead(b, 1024, keyCount*1024*2) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkRead__4KB_WithCache(b *testing.B) { |
||||
|
benchRead(b, 4096, keyCount*4096*2) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkRead_10KB_WithCache(b *testing.B) { |
||||
|
benchRead(b, 10240, keyCount*4096*2) |
||||
|
} |
@ -0,0 +1,117 @@ |
|||||
|
package diskv |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"io/ioutil" |
||||
|
"testing" |
||||
|
) |
||||
|
|
||||
|
func TestBasicStreamCaching(t *testing.T) { |
||||
|
d := New(Options{ |
||||
|
BasePath: "test-data", |
||||
|
CacheSizeMax: 1024, |
||||
|
}) |
||||
|
defer d.EraseAll() |
||||
|
|
||||
|
input := "a1b2c3" |
||||
|
key, writeBuf, sync := "a", bytes.NewBufferString(input), true |
||||
|
if err := d.WriteStream(key, writeBuf, sync); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
if d.isCached(key) { |
||||
|
t.Fatalf("'%s' cached, but shouldn't be (yet)", key) |
||||
|
} |
||||
|
|
||||
|
rc, err := d.ReadStream(key, false) |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
readBuf, err := ioutil.ReadAll(rc) |
||||
|
if err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
if !cmpBytes(readBuf, []byte(input)) { |
||||
|
t.Fatalf("'%s' != '%s'", string(readBuf), input) |
||||
|
} |
||||
|
|
||||
|
if !d.isCached(key) { |
||||
|
t.Fatalf("'%s' isn't cached, but should be", key) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestReadStreamDirect(t *testing.T) { |
||||
|
var ( |
||||
|
basePath = "test-data" |
||||
|
) |
||||
|
dWrite := New(Options{ |
||||
|
BasePath: basePath, |
||||
|
CacheSizeMax: 0, |
||||
|
}) |
||||
|
defer dWrite.EraseAll() |
||||
|
dRead := New(Options{ |
||||
|
BasePath: basePath, |
||||
|
CacheSizeMax: 1024, |
||||
|
}) |
||||
|
|
||||
|
// Write
|
||||
|
key, val1, val2 := "a", []byte(`1234567890`), []byte(`aaaaaaaaaa`) |
||||
|
if err := dWrite.Write(key, val1); err != nil { |
||||
|
t.Fatalf("during first write: %s", err) |
||||
|
} |
||||
|
|
||||
|
// First, caching read.
|
||||
|
val, err := dRead.Read(key) |
||||
|
if err != nil { |
||||
|
t.Fatalf("during initial read: %s", err) |
||||
|
} |
||||
|
t.Logf("read 1: %s => %s", key, string(val)) |
||||
|
if !cmpBytes(val1, val) { |
||||
|
t.Errorf("expected %q, got %q", string(val1), string(val)) |
||||
|
} |
||||
|
if !dRead.isCached(key) { |
||||
|
t.Errorf("%q should be cached, but isn't", key) |
||||
|
} |
||||
|
|
||||
|
// Write a different value.
|
||||
|
if err := dWrite.Write(key, val2); err != nil { |
||||
|
t.Fatalf("during second write: %s", err) |
||||
|
} |
||||
|
|
||||
|
// Second read, should hit cache and get the old value.
|
||||
|
val, err = dRead.Read(key) |
||||
|
if err != nil { |
||||
|
t.Fatalf("during second (cache-hit) read: %s", err) |
||||
|
} |
||||
|
t.Logf("read 2: %s => %s", key, string(val)) |
||||
|
if !cmpBytes(val1, val) { |
||||
|
t.Errorf("expected %q, got %q", string(val1), string(val)) |
||||
|
} |
||||
|
|
||||
|
// Third, direct read, should get the updated value.
|
||||
|
rc, err := dRead.ReadStream(key, true) |
||||
|
if err != nil { |
||||
|
t.Fatalf("during third (direct) read, ReadStream: %s", err) |
||||
|
} |
||||
|
defer rc.Close() |
||||
|
val, err = ioutil.ReadAll(rc) |
||||
|
if err != nil { |
||||
|
t.Fatalf("during third (direct) read, ReadAll: %s", err) |
||||
|
} |
||||
|
t.Logf("read 3: %s => %s", key, string(val)) |
||||
|
if !cmpBytes(val2, val) { |
||||
|
t.Errorf("expected %q, got %q", string(val1), string(val)) |
||||
|
} |
||||
|
|
||||
|
// Fourth read, should hit cache and get the new value.
|
||||
|
val, err = dRead.Read(key) |
||||
|
if err != nil { |
||||
|
t.Fatalf("during fourth (cache-hit) read: %s", err) |
||||
|
} |
||||
|
t.Logf("read 4: %s => %s", key, string(val)) |
||||
|
if !cmpBytes(val2, val) { |
||||
|
t.Errorf("expected %q, got %q", string(val1), string(val)) |
||||
|
} |
||||
|
} |
@ -0,0 +1,349 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"encoding/binary" |
||||
|
"fmt" |
||||
|
"io" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/errors" |
||||
|
"github.com/syndtr/goleveldb/leveldb/memdb" |
||||
|
"github.com/syndtr/goleveldb/leveldb/storage" |
||||
|
) |
||||
|
|
||||
|
// ErrBatchCorrupted records reason of batch corruption. This error will be
|
||||
|
// wrapped with errors.ErrCorrupted.
|
||||
|
type ErrBatchCorrupted struct { |
||||
|
Reason string |
||||
|
} |
||||
|
|
||||
|
func (e *ErrBatchCorrupted) Error() string { |
||||
|
return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason) |
||||
|
} |
||||
|
|
||||
|
func newErrBatchCorrupted(reason string) error { |
||||
|
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason}) |
||||
|
} |
||||
|
|
||||
|
const ( |
||||
|
batchHeaderLen = 8 + 4 |
||||
|
batchGrowRec = 3000 |
||||
|
batchBufioSize = 16 |
||||
|
) |
||||
|
|
||||
|
// BatchReplay wraps basic batch operations.
|
||||
|
type BatchReplay interface { |
||||
|
Put(key, value []byte) |
||||
|
Delete(key []byte) |
||||
|
} |
||||
|
|
||||
|
type batchIndex struct { |
||||
|
keyType keyType |
||||
|
keyPos, keyLen int |
||||
|
valuePos, valueLen int |
||||
|
} |
||||
|
|
||||
|
func (index batchIndex) k(data []byte) []byte { |
||||
|
return data[index.keyPos : index.keyPos+index.keyLen] |
||||
|
} |
||||
|
|
||||
|
func (index batchIndex) v(data []byte) []byte { |
||||
|
if index.valueLen != 0 { |
||||
|
return data[index.valuePos : index.valuePos+index.valueLen] |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (index batchIndex) kv(data []byte) (key, value []byte) { |
||||
|
return index.k(data), index.v(data) |
||||
|
} |
||||
|
|
||||
|
// Batch is a write batch.
|
||||
|
type Batch struct { |
||||
|
data []byte |
||||
|
index []batchIndex |
||||
|
|
||||
|
// internalLen is sums of key/value pair length plus 8-bytes internal key.
|
||||
|
internalLen int |
||||
|
} |
||||
|
|
||||
|
func (b *Batch) grow(n int) { |
||||
|
o := len(b.data) |
||||
|
if cap(b.data)-o < n { |
||||
|
div := 1 |
||||
|
if len(b.index) > batchGrowRec { |
||||
|
div = len(b.index) / batchGrowRec |
||||
|
} |
||||
|
ndata := make([]byte, o, o+n+o/div) |
||||
|
copy(ndata, b.data) |
||||
|
b.data = ndata |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (b *Batch) appendRec(kt keyType, key, value []byte) { |
||||
|
n := 1 + binary.MaxVarintLen32 + len(key) |
||||
|
if kt == keyTypeVal { |
||||
|
n += binary.MaxVarintLen32 + len(value) |
||||
|
} |
||||
|
b.grow(n) |
||||
|
index := batchIndex{keyType: kt} |
||||
|
o := len(b.data) |
||||
|
data := b.data[:o+n] |
||||
|
data[o] = byte(kt) |
||||
|
o++ |
||||
|
o += binary.PutUvarint(data[o:], uint64(len(key))) |
||||
|
index.keyPos = o |
||||
|
index.keyLen = len(key) |
||||
|
o += copy(data[o:], key) |
||||
|
if kt == keyTypeVal { |
||||
|
o += binary.PutUvarint(data[o:], uint64(len(value))) |
||||
|
index.valuePos = o |
||||
|
index.valueLen = len(value) |
||||
|
o += copy(data[o:], value) |
||||
|
} |
||||
|
b.data = data[:o] |
||||
|
b.index = append(b.index, index) |
||||
|
b.internalLen += index.keyLen + index.valueLen + 8 |
||||
|
} |
||||
|
|
||||
|
// Put appends 'put operation' of the given key/value pair to the batch.
|
||||
|
// It is safe to modify the contents of the argument after Put returns but not
|
||||
|
// before.
|
||||
|
func (b *Batch) Put(key, value []byte) { |
||||
|
b.appendRec(keyTypeVal, key, value) |
||||
|
} |
||||
|
|
||||
|
// Delete appends 'delete operation' of the given key to the batch.
|
||||
|
// It is safe to modify the contents of the argument after Delete returns but
|
||||
|
// not before.
|
||||
|
func (b *Batch) Delete(key []byte) { |
||||
|
b.appendRec(keyTypeDel, key, nil) |
||||
|
} |
||||
|
|
||||
|
// Dump dumps batch contents. The returned slice can be loaded into the
|
||||
|
// batch using Load method.
|
||||
|
// The returned slice is not its own copy, so the contents should not be
|
||||
|
// modified.
|
||||
|
func (b *Batch) Dump() []byte { |
||||
|
return b.data |
||||
|
} |
||||
|
|
||||
|
// Load loads given slice into the batch. Previous contents of the batch
|
||||
|
// will be discarded.
|
||||
|
// The given slice will not be copied and will be used as batch buffer, so
|
||||
|
// it is not safe to modify the contents of the slice.
|
||||
|
func (b *Batch) Load(data []byte) error { |
||||
|
return b.decode(data, -1) |
||||
|
} |
||||
|
|
||||
|
// Replay replays batch contents.
|
||||
|
func (b *Batch) Replay(r BatchReplay) error { |
||||
|
for _, index := range b.index { |
||||
|
switch index.keyType { |
||||
|
case keyTypeVal: |
||||
|
r.Put(index.k(b.data), index.v(b.data)) |
||||
|
case keyTypeDel: |
||||
|
r.Delete(index.k(b.data)) |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// Len returns number of records in the batch.
|
||||
|
func (b *Batch) Len() int { |
||||
|
return len(b.index) |
||||
|
} |
||||
|
|
||||
|
// Reset resets the batch.
|
||||
|
func (b *Batch) Reset() { |
||||
|
b.data = b.data[:0] |
||||
|
b.index = b.index[:0] |
||||
|
b.internalLen = 0 |
||||
|
} |
||||
|
|
||||
|
func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error { |
||||
|
for i, index := range b.index { |
||||
|
if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (b *Batch) append(p *Batch) { |
||||
|
ob := len(b.data) |
||||
|
oi := len(b.index) |
||||
|
b.data = append(b.data, p.data...) |
||||
|
b.index = append(b.index, p.index...) |
||||
|
b.internalLen += p.internalLen |
||||
|
|
||||
|
// Updating index offset.
|
||||
|
if ob != 0 { |
||||
|
for ; oi < len(b.index); oi++ { |
||||
|
index := &b.index[oi] |
||||
|
index.keyPos += ob |
||||
|
if index.valueLen != 0 { |
||||
|
index.valuePos += ob |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (b *Batch) decode(data []byte, expectedLen int) error { |
||||
|
b.data = data |
||||
|
b.index = b.index[:0] |
||||
|
b.internalLen = 0 |
||||
|
err := decodeBatch(data, func(i int, index batchIndex) error { |
||||
|
b.index = append(b.index, index) |
||||
|
b.internalLen += index.keyLen + index.valueLen + 8 |
||||
|
return nil |
||||
|
}) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if expectedLen >= 0 && len(b.index) != expectedLen { |
||||
|
return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index))) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error { |
||||
|
var ik []byte |
||||
|
for i, index := range b.index { |
||||
|
ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) |
||||
|
if err := mdb.Put(ik, index.v(b.data)); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error { |
||||
|
var ik []byte |
||||
|
for i, index := range b.index { |
||||
|
ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType) |
||||
|
if err := mdb.Delete(ik); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func newBatch() interface{} { |
||||
|
return &Batch{} |
||||
|
} |
||||
|
|
||||
|
func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error { |
||||
|
var index batchIndex |
||||
|
for i, o := 0, 0; o < len(data); i++ { |
||||
|
// Key type.
|
||||
|
index.keyType = keyType(data[o]) |
||||
|
if index.keyType > keyTypeVal { |
||||
|
return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType))) |
||||
|
} |
||||
|
o++ |
||||
|
|
||||
|
// Key.
|
||||
|
x, n := binary.Uvarint(data[o:]) |
||||
|
o += n |
||||
|
if n <= 0 || o+int(x) > len(data) { |
||||
|
return newErrBatchCorrupted("bad record: invalid key length") |
||||
|
} |
||||
|
index.keyPos = o |
||||
|
index.keyLen = int(x) |
||||
|
o += index.keyLen |
||||
|
|
||||
|
// Value.
|
||||
|
if index.keyType == keyTypeVal { |
||||
|
x, n = binary.Uvarint(data[o:]) |
||||
|
o += n |
||||
|
if n <= 0 || o+int(x) > len(data) { |
||||
|
return newErrBatchCorrupted("bad record: invalid value length") |
||||
|
} |
||||
|
index.valuePos = o |
||||
|
index.valueLen = int(x) |
||||
|
o += index.valueLen |
||||
|
} else { |
||||
|
index.valuePos = 0 |
||||
|
index.valueLen = 0 |
||||
|
} |
||||
|
|
||||
|
if err := fn(i, index); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) { |
||||
|
seq, batchLen, err = decodeBatchHeader(data) |
||||
|
if err != nil { |
||||
|
return 0, 0, err |
||||
|
} |
||||
|
if seq < expectSeq { |
||||
|
return 0, 0, newErrBatchCorrupted("invalid sequence number") |
||||
|
} |
||||
|
data = data[batchHeaderLen:] |
||||
|
var ik []byte |
||||
|
var decodedLen int |
||||
|
err = decodeBatch(data, func(i int, index batchIndex) error { |
||||
|
if i >= batchLen { |
||||
|
return newErrBatchCorrupted("invalid records length") |
||||
|
} |
||||
|
ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType) |
||||
|
if err := mdb.Put(ik, index.v(data)); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
decodedLen++ |
||||
|
return nil |
||||
|
}) |
||||
|
if err == nil && decodedLen != batchLen { |
||||
|
err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen)) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte { |
||||
|
dst = ensureBuffer(dst, batchHeaderLen) |
||||
|
binary.LittleEndian.PutUint64(dst, seq) |
||||
|
binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen)) |
||||
|
return dst |
||||
|
} |
||||
|
|
||||
|
func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) { |
||||
|
if len(data) < batchHeaderLen { |
||||
|
return 0, 0, newErrBatchCorrupted("too short") |
||||
|
} |
||||
|
|
||||
|
seq = binary.LittleEndian.Uint64(data) |
||||
|
batchLen = int(binary.LittleEndian.Uint32(data[8:])) |
||||
|
if batchLen < 0 { |
||||
|
return 0, 0, newErrBatchCorrupted("invalid records length") |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func batchesLen(batches []*Batch) int { |
||||
|
batchLen := 0 |
||||
|
for _, batch := range batches { |
||||
|
batchLen += batch.Len() |
||||
|
} |
||||
|
return batchLen |
||||
|
} |
||||
|
|
||||
|
func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error { |
||||
|
if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
for _, batch := range batches { |
||||
|
if _, err := wr.Write(batch.data); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
@ -0,0 +1,147 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"fmt" |
||||
|
"testing" |
||||
|
"testing/quick" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/testutil" |
||||
|
) |
||||
|
|
||||
|
func TestBatchHeader(t *testing.T) { |
||||
|
f := func(seq uint64, length uint32) bool { |
||||
|
encoded := encodeBatchHeader(nil, seq, int(length)) |
||||
|
decSeq, decLength, err := decodeBatchHeader(encoded) |
||||
|
return err == nil && decSeq == seq && decLength == int(length) |
||||
|
} |
||||
|
config := &quick.Config{ |
||||
|
Rand: testutil.NewRand(), |
||||
|
} |
||||
|
if err := quick.Check(f, config); err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type batchKV struct { |
||||
|
kt keyType |
||||
|
k, v []byte |
||||
|
} |
||||
|
|
||||
|
func TestBatch(t *testing.T) { |
||||
|
var ( |
||||
|
kvs []batchKV |
||||
|
internalLen int |
||||
|
) |
||||
|
batch := new(Batch) |
||||
|
rbatch := new(Batch) |
||||
|
abatch := new(Batch) |
||||
|
testBatch := func(i int, kt keyType, k, v []byte) error { |
||||
|
kv := kvs[i] |
||||
|
if kv.kt != kt { |
||||
|
return fmt.Errorf("invalid key type, index=%d: %d vs %d", i, kv.kt, kt) |
||||
|
} |
||||
|
if !bytes.Equal(kv.k, k) { |
||||
|
return fmt.Errorf("invalid key, index=%d", i) |
||||
|
} |
||||
|
if !bytes.Equal(kv.v, v) { |
||||
|
return fmt.Errorf("invalid value, index=%d", i) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
f := func(ktr uint8, k, v []byte) bool { |
||||
|
kt := keyType(ktr % 2) |
||||
|
if kt == keyTypeVal { |
||||
|
batch.Put(k, v) |
||||
|
rbatch.Put(k, v) |
||||
|
kvs = append(kvs, batchKV{kt: kt, k: k, v: v}) |
||||
|
internalLen += len(k) + len(v) + 8 |
||||
|
} else { |
||||
|
batch.Delete(k) |
||||
|
rbatch.Delete(k) |
||||
|
kvs = append(kvs, batchKV{kt: kt, k: k}) |
||||
|
internalLen += len(k) + 8 |
||||
|
} |
||||
|
if batch.Len() != len(kvs) { |
||||
|
t.Logf("batch.Len: %d vs %d", len(kvs), batch.Len()) |
||||
|
return false |
||||
|
} |
||||
|
if batch.internalLen != internalLen { |
||||
|
t.Logf("abatch.internalLen: %d vs %d", internalLen, batch.internalLen) |
||||
|
return false |
||||
|
} |
||||
|
if len(kvs)%1000 == 0 { |
||||
|
if err := batch.replayInternal(testBatch); err != nil { |
||||
|
t.Logf("batch.replayInternal: %v", err) |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
abatch.append(rbatch) |
||||
|
rbatch.Reset() |
||||
|
if abatch.Len() != len(kvs) { |
||||
|
t.Logf("abatch.Len: %d vs %d", len(kvs), abatch.Len()) |
||||
|
return false |
||||
|
} |
||||
|
if abatch.internalLen != internalLen { |
||||
|
t.Logf("abatch.internalLen: %d vs %d", internalLen, abatch.internalLen) |
||||
|
return false |
||||
|
} |
||||
|
if err := abatch.replayInternal(testBatch); err != nil { |
||||
|
t.Logf("abatch.replayInternal: %v", err) |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
nbatch := new(Batch) |
||||
|
if err := nbatch.Load(batch.Dump()); err != nil { |
||||
|
t.Logf("nbatch.Load: %v", err) |
||||
|
return false |
||||
|
} |
||||
|
if nbatch.Len() != len(kvs) { |
||||
|
t.Logf("nbatch.Len: %d vs %d", len(kvs), nbatch.Len()) |
||||
|
return false |
||||
|
} |
||||
|
if nbatch.internalLen != internalLen { |
||||
|
t.Logf("nbatch.internalLen: %d vs %d", internalLen, nbatch.internalLen) |
||||
|
return false |
||||
|
} |
||||
|
if err := nbatch.replayInternal(testBatch); err != nil { |
||||
|
t.Logf("nbatch.replayInternal: %v", err) |
||||
|
return false |
||||
|
} |
||||
|
} |
||||
|
if len(kvs)%10000 == 0 { |
||||
|
nbatch := new(Batch) |
||||
|
if err := batch.Replay(nbatch); err != nil { |
||||
|
t.Logf("batch.Replay: %v", err) |
||||
|
return false |
||||
|
} |
||||
|
if nbatch.Len() != len(kvs) { |
||||
|
t.Logf("nbatch.Len: %d vs %d", len(kvs), nbatch.Len()) |
||||
|
return false |
||||
|
} |
||||
|
if nbatch.internalLen != internalLen { |
||||
|
t.Logf("nbatch.internalLen: %d vs %d", internalLen, nbatch.internalLen) |
||||
|
return false |
||||
|
} |
||||
|
if err := nbatch.replayInternal(testBatch); err != nil { |
||||
|
t.Logf("nbatch.replayInternal: %v", err) |
||||
|
return false |
||||
|
} |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
config := &quick.Config{ |
||||
|
MaxCount: 40000, |
||||
|
Rand: testutil.NewRand(), |
||||
|
} |
||||
|
if err := quick.Check(f, config); err != nil { |
||||
|
t.Error(err) |
||||
|
} |
||||
|
t.Logf("length=%d internalLen=%d", len(kvs), internalLen) |
||||
|
} |
@ -0,0 +1,507 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"fmt" |
||||
|
"math/rand" |
||||
|
"os" |
||||
|
"path/filepath" |
||||
|
"runtime" |
||||
|
"sync/atomic" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/iterator" |
||||
|
"github.com/syndtr/goleveldb/leveldb/opt" |
||||
|
"github.com/syndtr/goleveldb/leveldb/storage" |
||||
|
) |
||||
|
|
||||
|
func randomString(r *rand.Rand, n int) []byte { |
||||
|
b := new(bytes.Buffer) |
||||
|
for i := 0; i < n; i++ { |
||||
|
b.WriteByte(' ' + byte(r.Intn(95))) |
||||
|
} |
||||
|
return b.Bytes() |
||||
|
} |
||||
|
|
||||
|
func compressibleStr(r *rand.Rand, frac float32, n int) []byte { |
||||
|
nn := int(float32(n) * frac) |
||||
|
rb := randomString(r, nn) |
||||
|
b := make([]byte, 0, n+nn) |
||||
|
for len(b) < n { |
||||
|
b = append(b, rb...) |
||||
|
} |
||||
|
return b[:n] |
||||
|
} |
||||
|
|
||||
|
type valueGen struct { |
||||
|
src []byte |
||||
|
pos int |
||||
|
} |
||||
|
|
||||
|
func newValueGen(frac float32) *valueGen { |
||||
|
v := new(valueGen) |
||||
|
r := rand.New(rand.NewSource(301)) |
||||
|
v.src = make([]byte, 0, 1048576+100) |
||||
|
for len(v.src) < 1048576 { |
||||
|
v.src = append(v.src, compressibleStr(r, frac, 100)...) |
||||
|
} |
||||
|
return v |
||||
|
} |
||||
|
|
||||
|
func (v *valueGen) get(n int) []byte { |
||||
|
if v.pos+n > len(v.src) { |
||||
|
v.pos = 0 |
||||
|
} |
||||
|
v.pos += n |
||||
|
return v.src[v.pos-n : v.pos] |
||||
|
} |
||||
|
|
||||
|
var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) |
||||
|
|
||||
|
type dbBench struct { |
||||
|
b *testing.B |
||||
|
stor storage.Storage |
||||
|
db *DB |
||||
|
|
||||
|
o *opt.Options |
||||
|
ro *opt.ReadOptions |
||||
|
wo *opt.WriteOptions |
||||
|
|
||||
|
keys, values [][]byte |
||||
|
} |
||||
|
|
||||
|
func openDBBench(b *testing.B, noCompress bool) *dbBench { |
||||
|
_, err := os.Stat(benchDB) |
||||
|
if err == nil { |
||||
|
err = os.RemoveAll(benchDB) |
||||
|
if err != nil { |
||||
|
b.Fatal("cannot remove old db: ", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
p := &dbBench{ |
||||
|
b: b, |
||||
|
o: &opt.Options{}, |
||||
|
ro: &opt.ReadOptions{}, |
||||
|
wo: &opt.WriteOptions{}, |
||||
|
} |
||||
|
p.stor, err = storage.OpenFile(benchDB, false) |
||||
|
if err != nil { |
||||
|
b.Fatal("cannot open stor: ", err) |
||||
|
} |
||||
|
if noCompress { |
||||
|
p.o.Compression = opt.NoCompression |
||||
|
} |
||||
|
|
||||
|
p.db, err = Open(p.stor, p.o) |
||||
|
if err != nil { |
||||
|
b.Fatal("cannot open db: ", err) |
||||
|
} |
||||
|
|
||||
|
return p |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) reopen() { |
||||
|
p.db.Close() |
||||
|
var err error |
||||
|
p.db, err = Open(p.stor, p.o) |
||||
|
if err != nil { |
||||
|
p.b.Fatal("Reopen: got error: ", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) populate(n int) { |
||||
|
p.keys, p.values = make([][]byte, n), make([][]byte, n) |
||||
|
v := newValueGen(0.5) |
||||
|
for i := range p.keys { |
||||
|
p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) randomize() { |
||||
|
m := len(p.keys) |
||||
|
times := m * 2 |
||||
|
r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) |
||||
|
for n := 0; n < times; n++ { |
||||
|
i, j := r1.Int()%m, r2.Int()%m |
||||
|
if i == j { |
||||
|
continue |
||||
|
} |
||||
|
p.keys[i], p.keys[j] = p.keys[j], p.keys[i] |
||||
|
p.values[i], p.values[j] = p.values[j], p.values[i] |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) writes(perBatch int) { |
||||
|
b := p.b |
||||
|
db := p.db |
||||
|
|
||||
|
n := len(p.keys) |
||||
|
m := n / perBatch |
||||
|
if n%perBatch > 0 { |
||||
|
m++ |
||||
|
} |
||||
|
batches := make([]Batch, m) |
||||
|
j := 0 |
||||
|
for i := range batches { |
||||
|
first := true |
||||
|
for ; j < n && ((j+1)%perBatch != 0 || first); j++ { |
||||
|
first = false |
||||
|
batches[i].Put(p.keys[j], p.values[j]) |
||||
|
} |
||||
|
} |
||||
|
runtime.GC() |
||||
|
|
||||
|
b.ResetTimer() |
||||
|
b.StartTimer() |
||||
|
for i := range batches { |
||||
|
err := db.Write(&(batches[i]), p.wo) |
||||
|
if err != nil { |
||||
|
b.Fatal("write failed: ", err) |
||||
|
} |
||||
|
} |
||||
|
b.StopTimer() |
||||
|
b.SetBytes(116) |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) gc() { |
||||
|
p.keys, p.values = nil, nil |
||||
|
runtime.GC() |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) puts() { |
||||
|
b := p.b |
||||
|
db := p.db |
||||
|
|
||||
|
b.ResetTimer() |
||||
|
b.StartTimer() |
||||
|
for i := range p.keys { |
||||
|
err := db.Put(p.keys[i], p.values[i], p.wo) |
||||
|
if err != nil { |
||||
|
b.Fatal("put failed: ", err) |
||||
|
} |
||||
|
} |
||||
|
b.StopTimer() |
||||
|
b.SetBytes(116) |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) fill() { |
||||
|
b := p.b |
||||
|
db := p.db |
||||
|
|
||||
|
perBatch := 10000 |
||||
|
batch := new(Batch) |
||||
|
for i, n := 0, len(p.keys); i < n; { |
||||
|
first := true |
||||
|
for ; i < n && ((i+1)%perBatch != 0 || first); i++ { |
||||
|
first = false |
||||
|
batch.Put(p.keys[i], p.values[i]) |
||||
|
} |
||||
|
err := db.Write(batch, p.wo) |
||||
|
if err != nil { |
||||
|
b.Fatal("write failed: ", err) |
||||
|
} |
||||
|
batch.Reset() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) gets() { |
||||
|
b := p.b |
||||
|
db := p.db |
||||
|
|
||||
|
b.ResetTimer() |
||||
|
for i := range p.keys { |
||||
|
_, err := db.Get(p.keys[i], p.ro) |
||||
|
if err != nil { |
||||
|
b.Error("got error: ", err) |
||||
|
} |
||||
|
} |
||||
|
b.StopTimer() |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) seeks() { |
||||
|
b := p.b |
||||
|
|
||||
|
iter := p.newIter() |
||||
|
defer iter.Release() |
||||
|
b.ResetTimer() |
||||
|
for i := range p.keys { |
||||
|
if !iter.Seek(p.keys[i]) { |
||||
|
b.Error("value not found for: ", string(p.keys[i])) |
||||
|
} |
||||
|
} |
||||
|
b.StopTimer() |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) newIter() iterator.Iterator { |
||||
|
iter := p.db.NewIterator(nil, p.ro) |
||||
|
err := iter.Error() |
||||
|
if err != nil { |
||||
|
p.b.Fatal("cannot create iterator: ", err) |
||||
|
} |
||||
|
return iter |
||||
|
} |
||||
|
|
||||
|
func (p *dbBench) close() { |
||||
|
if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { |
||||
|
p.b.Log("Block pool stats: ", bp) |
||||
|
} |
||||
|
p.db.Close() |
||||
|
p.stor.Close() |
||||
|
os.RemoveAll(benchDB) |
||||
|
p.db = nil |
||||
|
p.keys = nil |
||||
|
p.values = nil |
||||
|
runtime.GC() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBWrite(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.writes(1) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBWriteBatch(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.writes(1000) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBWriteUncompressed(b *testing.B) { |
||||
|
p := openDBBench(b, true) |
||||
|
p.populate(b.N) |
||||
|
p.writes(1) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBWriteBatchUncompressed(b *testing.B) { |
||||
|
p := openDBBench(b, true) |
||||
|
p.populate(b.N) |
||||
|
p.writes(1000) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBWriteRandom(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.randomize() |
||||
|
p.writes(1) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBWriteRandomSync(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.wo.Sync = true |
||||
|
p.populate(b.N) |
||||
|
p.writes(1) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBOverwrite(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.writes(1) |
||||
|
p.writes(1) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBOverwriteRandom(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.writes(1) |
||||
|
p.randomize() |
||||
|
p.writes(1) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBPut(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.puts() |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBRead(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.gc() |
||||
|
|
||||
|
iter := p.newIter() |
||||
|
b.ResetTimer() |
||||
|
for iter.Next() { |
||||
|
} |
||||
|
iter.Release() |
||||
|
b.StopTimer() |
||||
|
b.SetBytes(116) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBReadGC(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
|
||||
|
iter := p.newIter() |
||||
|
b.ResetTimer() |
||||
|
for iter.Next() { |
||||
|
} |
||||
|
iter.Release() |
||||
|
b.StopTimer() |
||||
|
b.SetBytes(116) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBReadUncompressed(b *testing.B) { |
||||
|
p := openDBBench(b, true) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.gc() |
||||
|
|
||||
|
iter := p.newIter() |
||||
|
b.ResetTimer() |
||||
|
for iter.Next() { |
||||
|
} |
||||
|
iter.Release() |
||||
|
b.StopTimer() |
||||
|
b.SetBytes(116) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBReadTable(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.reopen() |
||||
|
p.gc() |
||||
|
|
||||
|
iter := p.newIter() |
||||
|
b.ResetTimer() |
||||
|
for iter.Next() { |
||||
|
} |
||||
|
iter.Release() |
||||
|
b.StopTimer() |
||||
|
b.SetBytes(116) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBReadReverse(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.gc() |
||||
|
|
||||
|
iter := p.newIter() |
||||
|
b.ResetTimer() |
||||
|
iter.Last() |
||||
|
for iter.Prev() { |
||||
|
} |
||||
|
iter.Release() |
||||
|
b.StopTimer() |
||||
|
b.SetBytes(116) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBReadReverseTable(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.reopen() |
||||
|
p.gc() |
||||
|
|
||||
|
iter := p.newIter() |
||||
|
b.ResetTimer() |
||||
|
iter.Last() |
||||
|
for iter.Prev() { |
||||
|
} |
||||
|
iter.Release() |
||||
|
b.StopTimer() |
||||
|
b.SetBytes(116) |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBSeek(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.seeks() |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBSeekRandom(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.randomize() |
||||
|
p.seeks() |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBGet(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.gets() |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBGetRandom(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.randomize() |
||||
|
p.gets() |
||||
|
p.close() |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBReadConcurrent(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.gc() |
||||
|
defer p.close() |
||||
|
|
||||
|
b.ResetTimer() |
||||
|
b.SetBytes(116) |
||||
|
|
||||
|
b.RunParallel(func(pb *testing.PB) { |
||||
|
iter := p.newIter() |
||||
|
defer iter.Release() |
||||
|
for pb.Next() && iter.Next() { |
||||
|
} |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func BenchmarkDBReadConcurrent2(b *testing.B) { |
||||
|
p := openDBBench(b, false) |
||||
|
p.populate(b.N) |
||||
|
p.fill() |
||||
|
p.gc() |
||||
|
defer p.close() |
||||
|
|
||||
|
b.ResetTimer() |
||||
|
b.SetBytes(116) |
||||
|
|
||||
|
var dir uint32 |
||||
|
b.RunParallel(func(pb *testing.PB) { |
||||
|
iter := p.newIter() |
||||
|
defer iter.Release() |
||||
|
if atomic.AddUint32(&dir, 1)%2 == 0 { |
||||
|
for pb.Next() && iter.Next() { |
||||
|
} |
||||
|
} else { |
||||
|
if pb.Next() && iter.Last() { |
||||
|
for pb.Next() && iter.Prev() { |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
}) |
||||
|
} |
@ -0,0 +1,29 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package cache |
||||
|
|
||||
|
import ( |
||||
|
"math/rand" |
||||
|
"testing" |
||||
|
"time" |
||||
|
) |
||||
|
|
||||
|
func BenchmarkLRUCache(b *testing.B) { |
||||
|
c := NewCache(NewLRU(10000)) |
||||
|
|
||||
|
b.SetParallelism(10) |
||||
|
b.RunParallel(func(pb *testing.PB) { |
||||
|
r := rand.New(rand.NewSource(time.Now().UnixNano())) |
||||
|
|
||||
|
for pb.Next() { |
||||
|
key := uint64(r.Intn(1000000)) |
||||
|
c.Get(0, key, func() (int, Value) { |
||||
|
return 1, key |
||||
|
}).Release() |
||||
|
} |
||||
|
}) |
||||
|
} |
@ -0,0 +1,705 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
// Package cache provides interface and implementation of a cache algorithms.
|
||||
|
package cache |
||||
|
|
||||
|
import ( |
||||
|
"sync" |
||||
|
"sync/atomic" |
||||
|
"unsafe" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/util" |
||||
|
) |
||||
|
|
||||
|
// Cacher provides interface to implements a caching functionality.
|
||||
|
// An implementation must be safe for concurrent use.
|
||||
|
type Cacher interface { |
||||
|
// Capacity returns cache capacity.
|
||||
|
Capacity() int |
||||
|
|
||||
|
// SetCapacity sets cache capacity.
|
||||
|
SetCapacity(capacity int) |
||||
|
|
||||
|
// Promote promotes the 'cache node'.
|
||||
|
Promote(n *Node) |
||||
|
|
||||
|
// Ban evicts the 'cache node' and prevent subsequent 'promote'.
|
||||
|
Ban(n *Node) |
||||
|
|
||||
|
// Evict evicts the 'cache node'.
|
||||
|
Evict(n *Node) |
||||
|
|
||||
|
// EvictNS evicts 'cache node' with the given namespace.
|
||||
|
EvictNS(ns uint64) |
||||
|
|
||||
|
// EvictAll evicts all 'cache node'.
|
||||
|
EvictAll() |
||||
|
|
||||
|
// Close closes the 'cache tree'
|
||||
|
Close() error |
||||
|
} |
||||
|
|
||||
|
// Value is a 'cacheable object'. It may implements util.Releaser, if
|
||||
|
// so the the Release method will be called once object is released.
|
||||
|
type Value interface{} |
||||
|
|
||||
|
// NamespaceGetter provides convenient wrapper for namespace.
|
||||
|
type NamespaceGetter struct { |
||||
|
Cache *Cache |
||||
|
NS uint64 |
||||
|
} |
||||
|
|
||||
|
// Get simply calls Cache.Get() method.
|
||||
|
func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle { |
||||
|
return g.Cache.Get(g.NS, key, setFunc) |
||||
|
} |
||||
|
|
||||
|
// The hash tables implementation is based on:
|
||||
|
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
|
||||
|
// Kunlong Zhang, and Michael Spear.
|
||||
|
// ACM Symposium on Principles of Distributed Computing, Jul 2014.
|
||||
|
|
||||
|
const ( |
||||
|
mInitialSize = 1 << 4 |
||||
|
mOverflowThreshold = 1 << 5 |
||||
|
mOverflowGrowThreshold = 1 << 7 |
||||
|
) |
||||
|
|
||||
|
type mBucket struct { |
||||
|
mu sync.Mutex |
||||
|
node []*Node |
||||
|
frozen bool |
||||
|
} |
||||
|
|
||||
|
func (b *mBucket) freeze() []*Node { |
||||
|
b.mu.Lock() |
||||
|
defer b.mu.Unlock() |
||||
|
if !b.frozen { |
||||
|
b.frozen = true |
||||
|
} |
||||
|
return b.node |
||||
|
} |
||||
|
|
||||
|
func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) { |
||||
|
b.mu.Lock() |
||||
|
|
||||
|
if b.frozen { |
||||
|
b.mu.Unlock() |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// Scan the node.
|
||||
|
for _, n := range b.node { |
||||
|
if n.hash == hash && n.ns == ns && n.key == key { |
||||
|
atomic.AddInt32(&n.ref, 1) |
||||
|
b.mu.Unlock() |
||||
|
return true, false, n |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Get only.
|
||||
|
if noset { |
||||
|
b.mu.Unlock() |
||||
|
return true, false, nil |
||||
|
} |
||||
|
|
||||
|
// Create node.
|
||||
|
n = &Node{ |
||||
|
r: r, |
||||
|
hash: hash, |
||||
|
ns: ns, |
||||
|
key: key, |
||||
|
ref: 1, |
||||
|
} |
||||
|
// Add node to bucket.
|
||||
|
b.node = append(b.node, n) |
||||
|
bLen := len(b.node) |
||||
|
b.mu.Unlock() |
||||
|
|
||||
|
// Update counter.
|
||||
|
grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold |
||||
|
if bLen > mOverflowThreshold { |
||||
|
grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold |
||||
|
} |
||||
|
|
||||
|
// Grow.
|
||||
|
if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { |
||||
|
nhLen := len(h.buckets) << 1 |
||||
|
nh := &mNode{ |
||||
|
buckets: make([]unsafe.Pointer, nhLen), |
||||
|
mask: uint32(nhLen) - 1, |
||||
|
pred: unsafe.Pointer(h), |
||||
|
growThreshold: int32(nhLen * mOverflowThreshold), |
||||
|
shrinkThreshold: int32(nhLen >> 1), |
||||
|
} |
||||
|
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) |
||||
|
if !ok { |
||||
|
panic("BUG: failed swapping head") |
||||
|
} |
||||
|
go nh.initBuckets() |
||||
|
} |
||||
|
|
||||
|
return true, true, n |
||||
|
} |
||||
|
|
||||
|
func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) { |
||||
|
b.mu.Lock() |
||||
|
|
||||
|
if b.frozen { |
||||
|
b.mu.Unlock() |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// Scan the node.
|
||||
|
var ( |
||||
|
n *Node |
||||
|
bLen int |
||||
|
) |
||||
|
for i := range b.node { |
||||
|
n = b.node[i] |
||||
|
if n.ns == ns && n.key == key { |
||||
|
if atomic.LoadInt32(&n.ref) == 0 { |
||||
|
deleted = true |
||||
|
|
||||
|
// Call releaser.
|
||||
|
if n.value != nil { |
||||
|
if r, ok := n.value.(util.Releaser); ok { |
||||
|
r.Release() |
||||
|
} |
||||
|
n.value = nil |
||||
|
} |
||||
|
|
||||
|
// Remove node from bucket.
|
||||
|
b.node = append(b.node[:i], b.node[i+1:]...) |
||||
|
bLen = len(b.node) |
||||
|
} |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
b.mu.Unlock() |
||||
|
|
||||
|
if deleted { |
||||
|
// Call OnDel.
|
||||
|
for _, f := range n.onDel { |
||||
|
f() |
||||
|
} |
||||
|
|
||||
|
// Update counter.
|
||||
|
atomic.AddInt32(&r.size, int32(n.size)*-1) |
||||
|
shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold |
||||
|
if bLen >= mOverflowThreshold { |
||||
|
atomic.AddInt32(&h.overflow, -1) |
||||
|
} |
||||
|
|
||||
|
// Shrink.
|
||||
|
if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) { |
||||
|
nhLen := len(h.buckets) >> 1 |
||||
|
nh := &mNode{ |
||||
|
buckets: make([]unsafe.Pointer, nhLen), |
||||
|
mask: uint32(nhLen) - 1, |
||||
|
pred: unsafe.Pointer(h), |
||||
|
growThreshold: int32(nhLen * mOverflowThreshold), |
||||
|
shrinkThreshold: int32(nhLen >> 1), |
||||
|
} |
||||
|
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh)) |
||||
|
if !ok { |
||||
|
panic("BUG: failed swapping head") |
||||
|
} |
||||
|
go nh.initBuckets() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return true, deleted |
||||
|
} |
||||
|
|
||||
|
type mNode struct { |
||||
|
buckets []unsafe.Pointer // []*mBucket
|
||||
|
mask uint32 |
||||
|
pred unsafe.Pointer // *mNode
|
||||
|
resizeInProgess int32 |
||||
|
|
||||
|
overflow int32 |
||||
|
growThreshold int32 |
||||
|
shrinkThreshold int32 |
||||
|
} |
||||
|
|
||||
|
func (n *mNode) initBucket(i uint32) *mBucket { |
||||
|
if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil { |
||||
|
return b |
||||
|
} |
||||
|
|
||||
|
p := (*mNode)(atomic.LoadPointer(&n.pred)) |
||||
|
if p != nil { |
||||
|
var node []*Node |
||||
|
if n.mask > p.mask { |
||||
|
// Grow.
|
||||
|
pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask])) |
||||
|
if pb == nil { |
||||
|
pb = p.initBucket(i & p.mask) |
||||
|
} |
||||
|
m := pb.freeze() |
||||
|
// Split nodes.
|
||||
|
for _, x := range m { |
||||
|
if x.hash&n.mask == i { |
||||
|
node = append(node, x) |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
// Shrink.
|
||||
|
pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i])) |
||||
|
if pb0 == nil { |
||||
|
pb0 = p.initBucket(i) |
||||
|
} |
||||
|
pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))])) |
||||
|
if pb1 == nil { |
||||
|
pb1 = p.initBucket(i + uint32(len(n.buckets))) |
||||
|
} |
||||
|
m0 := pb0.freeze() |
||||
|
m1 := pb1.freeze() |
||||
|
// Merge nodes.
|
||||
|
node = make([]*Node, 0, len(m0)+len(m1)) |
||||
|
node = append(node, m0...) |
||||
|
node = append(node, m1...) |
||||
|
} |
||||
|
b := &mBucket{node: node} |
||||
|
if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) { |
||||
|
if len(node) > mOverflowThreshold { |
||||
|
atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold)) |
||||
|
} |
||||
|
return b |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return (*mBucket)(atomic.LoadPointer(&n.buckets[i])) |
||||
|
} |
||||
|
|
||||
|
func (n *mNode) initBuckets() { |
||||
|
for i := range n.buckets { |
||||
|
n.initBucket(uint32(i)) |
||||
|
} |
||||
|
atomic.StorePointer(&n.pred, nil) |
||||
|
} |
||||
|
|
||||
|
// Cache is a 'cache map'.
|
||||
|
type Cache struct { |
||||
|
mu sync.RWMutex |
||||
|
mHead unsafe.Pointer // *mNode
|
||||
|
nodes int32 |
||||
|
size int32 |
||||
|
cacher Cacher |
||||
|
closed bool |
||||
|
} |
||||
|
|
||||
|
// NewCache creates a new 'cache map'. The cacher is optional and
|
||||
|
// may be nil.
|
||||
|
func NewCache(cacher Cacher) *Cache { |
||||
|
h := &mNode{ |
||||
|
buckets: make([]unsafe.Pointer, mInitialSize), |
||||
|
mask: mInitialSize - 1, |
||||
|
growThreshold: int32(mInitialSize * mOverflowThreshold), |
||||
|
shrinkThreshold: 0, |
||||
|
} |
||||
|
for i := range h.buckets { |
||||
|
h.buckets[i] = unsafe.Pointer(&mBucket{}) |
||||
|
} |
||||
|
r := &Cache{ |
||||
|
mHead: unsafe.Pointer(h), |
||||
|
cacher: cacher, |
||||
|
} |
||||
|
return r |
||||
|
} |
||||
|
|
||||
|
func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) { |
||||
|
h := (*mNode)(atomic.LoadPointer(&r.mHead)) |
||||
|
i := hash & h.mask |
||||
|
b := (*mBucket)(atomic.LoadPointer(&h.buckets[i])) |
||||
|
if b == nil { |
||||
|
b = h.initBucket(i) |
||||
|
} |
||||
|
return h, b |
||||
|
} |
||||
|
|
||||
|
func (r *Cache) delete(n *Node) bool { |
||||
|
for { |
||||
|
h, b := r.getBucket(n.hash) |
||||
|
done, deleted := b.delete(r, h, n.hash, n.ns, n.key) |
||||
|
if done { |
||||
|
return deleted |
||||
|
} |
||||
|
} |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
// Nodes returns number of 'cache node' in the map.
|
||||
|
func (r *Cache) Nodes() int { |
||||
|
return int(atomic.LoadInt32(&r.nodes)) |
||||
|
} |
||||
|
|
||||
|
// Size returns sums of 'cache node' size in the map.
|
||||
|
func (r *Cache) Size() int { |
||||
|
return int(atomic.LoadInt32(&r.size)) |
||||
|
} |
||||
|
|
||||
|
// Capacity returns cache capacity.
|
||||
|
func (r *Cache) Capacity() int { |
||||
|
if r.cacher == nil { |
||||
|
return 0 |
||||
|
} |
||||
|
return r.cacher.Capacity() |
||||
|
} |
||||
|
|
||||
|
// SetCapacity sets cache capacity.
|
||||
|
func (r *Cache) SetCapacity(capacity int) { |
||||
|
if r.cacher != nil { |
||||
|
r.cacher.SetCapacity(capacity) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Get gets 'cache node' with the given namespace and key.
|
||||
|
// If cache node is not found and setFunc is not nil, Get will atomically creates
|
||||
|
// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
|
||||
|
//
|
||||
|
// The returned 'cache handle' should be released after use by calling Release
|
||||
|
// method.
|
||||
|
func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle { |
||||
|
r.mu.RLock() |
||||
|
defer r.mu.RUnlock() |
||||
|
if r.closed { |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
hash := murmur32(ns, key, 0xf00) |
||||
|
for { |
||||
|
h, b := r.getBucket(hash) |
||||
|
done, _, n := b.get(r, h, hash, ns, key, setFunc == nil) |
||||
|
if done { |
||||
|
if n != nil { |
||||
|
n.mu.Lock() |
||||
|
if n.value == nil { |
||||
|
if setFunc == nil { |
||||
|
n.mu.Unlock() |
||||
|
n.unref() |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
n.size, n.value = setFunc() |
||||
|
if n.value == nil { |
||||
|
n.size = 0 |
||||
|
n.mu.Unlock() |
||||
|
n.unref() |
||||
|
return nil |
||||
|
} |
||||
|
atomic.AddInt32(&r.size, int32(n.size)) |
||||
|
} |
||||
|
n.mu.Unlock() |
||||
|
if r.cacher != nil { |
||||
|
r.cacher.Promote(n) |
||||
|
} |
||||
|
return &Handle{unsafe.Pointer(n)} |
||||
|
} |
||||
|
|
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// Delete removes and ban 'cache node' with the given namespace and key.
|
||||
|
// A banned 'cache node' will never inserted into the 'cache tree'. Ban
|
||||
|
// only attributed to the particular 'cache node', so when a 'cache node'
|
||||
|
// is recreated it will not be banned.
|
||||
|
//
|
||||
|
// If onDel is not nil, then it will be executed if such 'cache node'
|
||||
|
// doesn't exist or once the 'cache node' is released.
|
||||
|
//
|
||||
|
// Delete return true is such 'cache node' exist.
|
||||
|
func (r *Cache) Delete(ns, key uint64, onDel func()) bool { |
||||
|
r.mu.RLock() |
||||
|
defer r.mu.RUnlock() |
||||
|
if r.closed { |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
hash := murmur32(ns, key, 0xf00) |
||||
|
for { |
||||
|
h, b := r.getBucket(hash) |
||||
|
done, _, n := b.get(r, h, hash, ns, key, true) |
||||
|
if done { |
||||
|
if n != nil { |
||||
|
if onDel != nil { |
||||
|
n.mu.Lock() |
||||
|
n.onDel = append(n.onDel, onDel) |
||||
|
n.mu.Unlock() |
||||
|
} |
||||
|
if r.cacher != nil { |
||||
|
r.cacher.Ban(n) |
||||
|
} |
||||
|
n.unref() |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if onDel != nil { |
||||
|
onDel() |
||||
|
} |
||||
|
|
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
// Evict evicts 'cache node' with the given namespace and key. This will
|
||||
|
// simply call Cacher.Evict.
|
||||
|
//
|
||||
|
// Evict return true is such 'cache node' exist.
|
||||
|
func (r *Cache) Evict(ns, key uint64) bool { |
||||
|
r.mu.RLock() |
||||
|
defer r.mu.RUnlock() |
||||
|
if r.closed { |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
hash := murmur32(ns, key, 0xf00) |
||||
|
for { |
||||
|
h, b := r.getBucket(hash) |
||||
|
done, _, n := b.get(r, h, hash, ns, key, true) |
||||
|
if done { |
||||
|
if n != nil { |
||||
|
if r.cacher != nil { |
||||
|
r.cacher.Evict(n) |
||||
|
} |
||||
|
n.unref() |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
// EvictNS evicts 'cache node' with the given namespace. This will
|
||||
|
// simply call Cacher.EvictNS.
|
||||
|
func (r *Cache) EvictNS(ns uint64) { |
||||
|
r.mu.RLock() |
||||
|
defer r.mu.RUnlock() |
||||
|
if r.closed { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if r.cacher != nil { |
||||
|
r.cacher.EvictNS(ns) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
|
||||
|
func (r *Cache) EvictAll() { |
||||
|
r.mu.RLock() |
||||
|
defer r.mu.RUnlock() |
||||
|
if r.closed { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
if r.cacher != nil { |
||||
|
r.cacher.EvictAll() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Close closes the 'cache map' and forcefully releases all 'cache node'.
|
||||
|
func (r *Cache) Close() error { |
||||
|
r.mu.Lock() |
||||
|
if !r.closed { |
||||
|
r.closed = true |
||||
|
|
||||
|
h := (*mNode)(r.mHead) |
||||
|
h.initBuckets() |
||||
|
|
||||
|
for i := range h.buckets { |
||||
|
b := (*mBucket)(h.buckets[i]) |
||||
|
for _, n := range b.node { |
||||
|
// Call releaser.
|
||||
|
if n.value != nil { |
||||
|
if r, ok := n.value.(util.Releaser); ok { |
||||
|
r.Release() |
||||
|
} |
||||
|
n.value = nil |
||||
|
} |
||||
|
|
||||
|
// Call OnDel.
|
||||
|
for _, f := range n.onDel { |
||||
|
f() |
||||
|
} |
||||
|
n.onDel = nil |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
r.mu.Unlock() |
||||
|
|
||||
|
// Avoid deadlock.
|
||||
|
if r.cacher != nil { |
||||
|
if err := r.cacher.Close(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but
|
||||
|
// unlike Close it doesn't forcefully releases 'cache node'.
|
||||
|
func (r *Cache) CloseWeak() error { |
||||
|
r.mu.Lock() |
||||
|
if !r.closed { |
||||
|
r.closed = true |
||||
|
} |
||||
|
r.mu.Unlock() |
||||
|
|
||||
|
// Avoid deadlock.
|
||||
|
if r.cacher != nil { |
||||
|
r.cacher.EvictAll() |
||||
|
if err := r.cacher.Close(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// Node is a 'cache node'.
|
||||
|
type Node struct { |
||||
|
r *Cache |
||||
|
|
||||
|
hash uint32 |
||||
|
ns, key uint64 |
||||
|
|
||||
|
mu sync.Mutex |
||||
|
size int |
||||
|
value Value |
||||
|
|
||||
|
ref int32 |
||||
|
onDel []func() |
||||
|
|
||||
|
CacheData unsafe.Pointer |
||||
|
} |
||||
|
|
||||
|
// NS returns this 'cache node' namespace.
|
||||
|
func (n *Node) NS() uint64 { |
||||
|
return n.ns |
||||
|
} |
||||
|
|
||||
|
// Key returns this 'cache node' key.
|
||||
|
func (n *Node) Key() uint64 { |
||||
|
return n.key |
||||
|
} |
||||
|
|
||||
|
// Size returns this 'cache node' size.
|
||||
|
func (n *Node) Size() int { |
||||
|
return n.size |
||||
|
} |
||||
|
|
||||
|
// Value returns this 'cache node' value.
|
||||
|
func (n *Node) Value() Value { |
||||
|
return n.value |
||||
|
} |
||||
|
|
||||
|
// Ref returns this 'cache node' ref counter.
|
||||
|
func (n *Node) Ref() int32 { |
||||
|
return atomic.LoadInt32(&n.ref) |
||||
|
} |
||||
|
|
||||
|
// GetHandle returns an handle for this 'cache node'.
|
||||
|
func (n *Node) GetHandle() *Handle { |
||||
|
if atomic.AddInt32(&n.ref, 1) <= 1 { |
||||
|
panic("BUG: Node.GetHandle on zero ref") |
||||
|
} |
||||
|
return &Handle{unsafe.Pointer(n)} |
||||
|
} |
||||
|
|
||||
|
func (n *Node) unref() { |
||||
|
if atomic.AddInt32(&n.ref, -1) == 0 { |
||||
|
n.r.delete(n) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (n *Node) unrefLocked() { |
||||
|
if atomic.AddInt32(&n.ref, -1) == 0 { |
||||
|
n.r.mu.RLock() |
||||
|
if !n.r.closed { |
||||
|
n.r.delete(n) |
||||
|
} |
||||
|
n.r.mu.RUnlock() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Handle is a 'cache handle' of a 'cache node'.
|
||||
|
type Handle struct { |
||||
|
n unsafe.Pointer // *Node
|
||||
|
} |
||||
|
|
||||
|
// Value returns the value of the 'cache node'.
|
||||
|
func (h *Handle) Value() Value { |
||||
|
n := (*Node)(atomic.LoadPointer(&h.n)) |
||||
|
if n != nil { |
||||
|
return n.value |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// Release releases this 'cache handle'.
|
||||
|
// It is safe to call release multiple times.
|
||||
|
func (h *Handle) Release() { |
||||
|
nPtr := atomic.LoadPointer(&h.n) |
||||
|
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) { |
||||
|
n := (*Node)(nPtr) |
||||
|
n.unrefLocked() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func murmur32(ns, key uint64, seed uint32) uint32 { |
||||
|
const ( |
||||
|
m = uint32(0x5bd1e995) |
||||
|
r = 24 |
||||
|
) |
||||
|
|
||||
|
k1 := uint32(ns >> 32) |
||||
|
k2 := uint32(ns) |
||||
|
k3 := uint32(key >> 32) |
||||
|
k4 := uint32(key) |
||||
|
|
||||
|
k1 *= m |
||||
|
k1 ^= k1 >> r |
||||
|
k1 *= m |
||||
|
|
||||
|
k2 *= m |
||||
|
k2 ^= k2 >> r |
||||
|
k2 *= m |
||||
|
|
||||
|
k3 *= m |
||||
|
k3 ^= k3 >> r |
||||
|
k3 *= m |
||||
|
|
||||
|
k4 *= m |
||||
|
k4 ^= k4 >> r |
||||
|
k4 *= m |
||||
|
|
||||
|
h := seed |
||||
|
|
||||
|
h *= m |
||||
|
h ^= k1 |
||||
|
h *= m |
||||
|
h ^= k2 |
||||
|
h *= m |
||||
|
h ^= k3 |
||||
|
h *= m |
||||
|
h ^= k4 |
||||
|
|
||||
|
h ^= h >> 13 |
||||
|
h *= m |
||||
|
h ^= h >> 15 |
||||
|
|
||||
|
return h |
||||
|
} |
@ -0,0 +1,563 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package cache |
||||
|
|
||||
|
import ( |
||||
|
"math/rand" |
||||
|
"runtime" |
||||
|
"sync" |
||||
|
"sync/atomic" |
||||
|
"testing" |
||||
|
"time" |
||||
|
"unsafe" |
||||
|
) |
||||
|
|
||||
|
type int32o int32 |
||||
|
|
||||
|
func (o *int32o) acquire() { |
||||
|
if atomic.AddInt32((*int32)(o), 1) != 1 { |
||||
|
panic("BUG: invalid ref") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (o *int32o) Release() { |
||||
|
if atomic.AddInt32((*int32)(o), -1) != 0 { |
||||
|
panic("BUG: invalid ref") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type releaserFunc struct { |
||||
|
fn func() |
||||
|
value Value |
||||
|
} |
||||
|
|
||||
|
func (r releaserFunc) Release() { |
||||
|
if r.fn != nil { |
||||
|
r.fn() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle { |
||||
|
return c.Get(ns, key, func() (int, Value) { |
||||
|
if relf != nil { |
||||
|
return charge, releaserFunc{relf, value} |
||||
|
} |
||||
|
return charge, value |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
type cacheMapTestParams struct { |
||||
|
nobjects, nhandles, concurrent, repeat int |
||||
|
} |
||||
|
|
||||
|
func TestCacheMap(t *testing.T) { |
||||
|
runtime.GOMAXPROCS(runtime.NumCPU()) |
||||
|
|
||||
|
var params []cacheMapTestParams |
||||
|
if testing.Short() { |
||||
|
params = []cacheMapTestParams{ |
||||
|
{1000, 100, 20, 3}, |
||||
|
{10000, 300, 50, 10}, |
||||
|
} |
||||
|
} else { |
||||
|
params = []cacheMapTestParams{ |
||||
|
{10000, 400, 50, 3}, |
||||
|
{100000, 1000, 100, 10}, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
var ( |
||||
|
objects [][]int32o |
||||
|
handles [][]unsafe.Pointer |
||||
|
) |
||||
|
|
||||
|
for _, x := range params { |
||||
|
objects = append(objects, make([]int32o, x.nobjects)) |
||||
|
handles = append(handles, make([]unsafe.Pointer, x.nhandles)) |
||||
|
} |
||||
|
|
||||
|
c := NewCache(nil) |
||||
|
|
||||
|
wg := new(sync.WaitGroup) |
||||
|
var done int32 |
||||
|
|
||||
|
for ns, x := range params { |
||||
|
for i := 0; i < x.concurrent; i++ { |
||||
|
wg.Add(1) |
||||
|
go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) { |
||||
|
defer wg.Done() |
||||
|
r := rand.New(rand.NewSource(time.Now().UnixNano())) |
||||
|
|
||||
|
for j := len(objects) * repeat; j >= 0; j-- { |
||||
|
key := uint64(r.Intn(len(objects))) |
||||
|
h := c.Get(uint64(ns), key, func() (int, Value) { |
||||
|
o := &objects[key] |
||||
|
o.acquire() |
||||
|
return 1, o |
||||
|
}) |
||||
|
if v := h.Value().(*int32o); v != &objects[key] { |
||||
|
t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v) |
||||
|
} |
||||
|
if objects[key] != 1 { |
||||
|
t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key]) |
||||
|
} |
||||
|
if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) { |
||||
|
h.Release() |
||||
|
} |
||||
|
} |
||||
|
}(ns, i, x.repeat, objects[ns], handles[ns]) |
||||
|
} |
||||
|
|
||||
|
go func(handles []unsafe.Pointer) { |
||||
|
r := rand.New(rand.NewSource(time.Now().UnixNano())) |
||||
|
|
||||
|
for atomic.LoadInt32(&done) == 0 { |
||||
|
i := r.Intn(len(handles)) |
||||
|
h := (*Handle)(atomic.LoadPointer(&handles[i])) |
||||
|
if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) { |
||||
|
h.Release() |
||||
|
} |
||||
|
time.Sleep(time.Millisecond) |
||||
|
} |
||||
|
}(handles[ns]) |
||||
|
} |
||||
|
|
||||
|
go func() { |
||||
|
handles := make([]*Handle, 100000) |
||||
|
for atomic.LoadInt32(&done) == 0 { |
||||
|
for i := range handles { |
||||
|
handles[i] = c.Get(999999999, uint64(i), func() (int, Value) { |
||||
|
return 1, 1 |
||||
|
}) |
||||
|
} |
||||
|
for _, h := range handles { |
||||
|
h.Release() |
||||
|
} |
||||
|
} |
||||
|
}() |
||||
|
|
||||
|
wg.Wait() |
||||
|
|
||||
|
atomic.StoreInt32(&done, 1) |
||||
|
|
||||
|
for _, handles0 := range handles { |
||||
|
for i := range handles0 { |
||||
|
h := (*Handle)(atomic.LoadPointer(&handles0[i])) |
||||
|
if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) { |
||||
|
h.Release() |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
for ns, objects0 := range objects { |
||||
|
for i, o := range objects0 { |
||||
|
if o != 0 { |
||||
|
t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestCacheMap_NodesAndSize(t *testing.T) { |
||||
|
c := NewCache(nil) |
||||
|
if c.Nodes() != 0 { |
||||
|
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) |
||||
|
} |
||||
|
if c.Size() != 0 { |
||||
|
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) |
||||
|
} |
||||
|
set(c, 0, 1, 1, 1, nil) |
||||
|
set(c, 0, 2, 2, 2, nil) |
||||
|
set(c, 1, 1, 3, 3, nil) |
||||
|
set(c, 2, 1, 4, 1, nil) |
||||
|
if c.Nodes() != 4 { |
||||
|
t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes()) |
||||
|
} |
||||
|
if c.Size() != 7 { |
||||
|
t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size()) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestLRUCache_Capacity(t *testing.T) { |
||||
|
c := NewCache(NewLRU(10)) |
||||
|
if c.Capacity() != 10 { |
||||
|
t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity()) |
||||
|
} |
||||
|
set(c, 0, 1, 1, 1, nil).Release() |
||||
|
set(c, 0, 2, 2, 2, nil).Release() |
||||
|
set(c, 1, 1, 3, 3, nil).Release() |
||||
|
set(c, 2, 1, 4, 1, nil).Release() |
||||
|
set(c, 2, 2, 5, 1, nil).Release() |
||||
|
set(c, 2, 3, 6, 1, nil).Release() |
||||
|
set(c, 2, 4, 7, 1, nil).Release() |
||||
|
set(c, 2, 5, 8, 1, nil).Release() |
||||
|
if c.Nodes() != 7 { |
||||
|
t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes()) |
||||
|
} |
||||
|
if c.Size() != 10 { |
||||
|
t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size()) |
||||
|
} |
||||
|
c.SetCapacity(9) |
||||
|
if c.Capacity() != 9 { |
||||
|
t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity()) |
||||
|
} |
||||
|
if c.Nodes() != 6 { |
||||
|
t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes()) |
||||
|
} |
||||
|
if c.Size() != 8 { |
||||
|
t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size()) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestCacheMap_NilValue(t *testing.T) { |
||||
|
c := NewCache(NewLRU(10)) |
||||
|
h := c.Get(0, 0, func() (size int, value Value) { |
||||
|
return 1, nil |
||||
|
}) |
||||
|
if h != nil { |
||||
|
t.Error("cache handle is non-nil") |
||||
|
} |
||||
|
if c.Nodes() != 0 { |
||||
|
t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) |
||||
|
} |
||||
|
if c.Size() != 0 { |
||||
|
t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestLRUCache_GetLatency(t *testing.T) { |
||||
|
runtime.GOMAXPROCS(runtime.NumCPU()) |
||||
|
|
||||
|
const ( |
||||
|
concurrentSet = 30 |
||||
|
concurrentGet = 3 |
||||
|
duration = 3 * time.Second |
||||
|
delay = 3 * time.Millisecond |
||||
|
maxkey = 100000 |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
set, getHit, getAll int32 |
||||
|
getMaxLatency, getDuration int64 |
||||
|
) |
||||
|
|
||||
|
c := NewCache(NewLRU(5000)) |
||||
|
wg := &sync.WaitGroup{} |
||||
|
until := time.Now().Add(duration) |
||||
|
for i := 0; i < concurrentSet; i++ { |
||||
|
wg.Add(1) |
||||
|
go func(i int) { |
||||
|
defer wg.Done() |
||||
|
r := rand.New(rand.NewSource(time.Now().UnixNano())) |
||||
|
for time.Now().Before(until) { |
||||
|
c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) { |
||||
|
time.Sleep(delay) |
||||
|
atomic.AddInt32(&set, 1) |
||||
|
return 1, 1 |
||||
|
}).Release() |
||||
|
} |
||||
|
}(i) |
||||
|
} |
||||
|
for i := 0; i < concurrentGet; i++ { |
||||
|
wg.Add(1) |
||||
|
go func(i int) { |
||||
|
defer wg.Done() |
||||
|
r := rand.New(rand.NewSource(time.Now().UnixNano())) |
||||
|
for { |
||||
|
mark := time.Now() |
||||
|
if mark.Before(until) { |
||||
|
h := c.Get(0, uint64(r.Intn(maxkey)), nil) |
||||
|
latency := int64(time.Now().Sub(mark)) |
||||
|
m := atomic.LoadInt64(&getMaxLatency) |
||||
|
if latency > m { |
||||
|
atomic.CompareAndSwapInt64(&getMaxLatency, m, latency) |
||||
|
} |
||||
|
atomic.AddInt64(&getDuration, latency) |
||||
|
if h != nil { |
||||
|
atomic.AddInt32(&getHit, 1) |
||||
|
h.Release() |
||||
|
} |
||||
|
atomic.AddInt32(&getAll, 1) |
||||
|
} else { |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
}(i) |
||||
|
} |
||||
|
|
||||
|
wg.Wait() |
||||
|
getAvglatency := time.Duration(getDuration) / time.Duration(getAll) |
||||
|
t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v", |
||||
|
set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency) |
||||
|
|
||||
|
if getAvglatency > delay/3 { |
||||
|
t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestLRUCache_HitMiss(t *testing.T) { |
||||
|
cases := []struct { |
||||
|
key uint64 |
||||
|
value string |
||||
|
}{ |
||||
|
{1, "vvvvvvvvv"}, |
||||
|
{100, "v1"}, |
||||
|
{0, "v2"}, |
||||
|
{12346, "v3"}, |
||||
|
{777, "v4"}, |
||||
|
{999, "v5"}, |
||||
|
{7654, "v6"}, |
||||
|
{2, "v7"}, |
||||
|
{3, "v8"}, |
||||
|
{9, "v9"}, |
||||
|
} |
||||
|
|
||||
|
setfin := 0 |
||||
|
c := NewCache(NewLRU(1000)) |
||||
|
for i, x := range cases { |
||||
|
set(c, 0, x.key, x.value, len(x.value), func() { |
||||
|
setfin++ |
||||
|
}).Release() |
||||
|
for j, y := range cases { |
||||
|
h := c.Get(0, y.key, nil) |
||||
|
if j <= i { |
||||
|
// should hit
|
||||
|
if h == nil { |
||||
|
t.Errorf("case '%d' iteration '%d' is miss", i, j) |
||||
|
} else { |
||||
|
if x := h.Value().(releaserFunc).value.(string); x != y.value { |
||||
|
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
// should miss
|
||||
|
if h != nil { |
||||
|
t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string)) |
||||
|
} |
||||
|
} |
||||
|
if h != nil { |
||||
|
h.Release() |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
for i, x := range cases { |
||||
|
finalizerOk := false |
||||
|
c.Delete(0, x.key, func() { |
||||
|
finalizerOk = true |
||||
|
}) |
||||
|
|
||||
|
if !finalizerOk { |
||||
|
t.Errorf("case %d delete finalizer not executed", i) |
||||
|
} |
||||
|
|
||||
|
for j, y := range cases { |
||||
|
h := c.Get(0, y.key, nil) |
||||
|
if j > i { |
||||
|
// should hit
|
||||
|
if h == nil { |
||||
|
t.Errorf("case '%d' iteration '%d' is miss", i, j) |
||||
|
} else { |
||||
|
if x := h.Value().(releaserFunc).value.(string); x != y.value { |
||||
|
t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
// should miss
|
||||
|
if h != nil { |
||||
|
t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string)) |
||||
|
} |
||||
|
} |
||||
|
if h != nil { |
||||
|
h.Release() |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if setfin != len(cases) { |
||||
|
t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestLRUCache_Eviction(t *testing.T) { |
||||
|
c := NewCache(NewLRU(12)) |
||||
|
o1 := set(c, 0, 1, 1, 1, nil) |
||||
|
set(c, 0, 2, 2, 1, nil).Release() |
||||
|
set(c, 0, 3, 3, 1, nil).Release() |
||||
|
set(c, 0, 4, 4, 1, nil).Release() |
||||
|
set(c, 0, 5, 5, 1, nil).Release() |
||||
|
if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
|
||||
|
h.Release() |
||||
|
} |
||||
|
set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
|
||||
|
|
||||
|
for _, key := range []uint64{9, 2, 5, 1} { |
||||
|
h := c.Get(0, key, nil) |
||||
|
if h == nil { |
||||
|
t.Errorf("miss for key '%d'", key) |
||||
|
} else { |
||||
|
if x := h.Value().(int); x != int(key) { |
||||
|
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) |
||||
|
} |
||||
|
h.Release() |
||||
|
} |
||||
|
} |
||||
|
o1.Release() |
||||
|
for _, key := range []uint64{1, 2, 5} { |
||||
|
h := c.Get(0, key, nil) |
||||
|
if h == nil { |
||||
|
t.Errorf("miss for key '%d'", key) |
||||
|
} else { |
||||
|
if x := h.Value().(int); x != int(key) { |
||||
|
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) |
||||
|
} |
||||
|
h.Release() |
||||
|
} |
||||
|
} |
||||
|
for _, key := range []uint64{3, 4, 9} { |
||||
|
h := c.Get(0, key, nil) |
||||
|
if h != nil { |
||||
|
t.Errorf("hit for key '%d'", key) |
||||
|
if x := h.Value().(int); x != int(key) { |
||||
|
t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) |
||||
|
} |
||||
|
h.Release() |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestLRUCache_Evict(t *testing.T) { |
||||
|
c := NewCache(NewLRU(6)) |
||||
|
set(c, 0, 1, 1, 1, nil).Release() |
||||
|
set(c, 0, 2, 2, 1, nil).Release() |
||||
|
set(c, 1, 1, 4, 1, nil).Release() |
||||
|
set(c, 1, 2, 5, 1, nil).Release() |
||||
|
set(c, 2, 1, 6, 1, nil).Release() |
||||
|
set(c, 2, 2, 7, 1, nil).Release() |
||||
|
|
||||
|
for ns := 0; ns < 3; ns++ { |
||||
|
for key := 1; key < 3; key++ { |
||||
|
if h := c.Get(uint64(ns), uint64(key), nil); h != nil { |
||||
|
h.Release() |
||||
|
} else { |
||||
|
t.Errorf("Cache.Get on #%d.%d return nil", ns, key) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if ok := c.Evict(0, 1); !ok { |
||||
|
t.Error("first Cache.Evict on #0.1 return false") |
||||
|
} |
||||
|
if ok := c.Evict(0, 1); ok { |
||||
|
t.Error("second Cache.Evict on #0.1 return true") |
||||
|
} |
||||
|
if h := c.Get(0, 1, nil); h != nil { |
||||
|
t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value()) |
||||
|
} |
||||
|
|
||||
|
c.EvictNS(1) |
||||
|
if h := c.Get(1, 1, nil); h != nil { |
||||
|
t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value()) |
||||
|
} |
||||
|
if h := c.Get(1, 2, nil); h != nil { |
||||
|
t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value()) |
||||
|
} |
||||
|
|
||||
|
c.EvictAll() |
||||
|
for ns := 0; ns < 3; ns++ { |
||||
|
for key := 1; key < 3; key++ { |
||||
|
if h := c.Get(uint64(ns), uint64(key), nil); h != nil { |
||||
|
t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value()) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestLRUCache_Delete(t *testing.T) { |
||||
|
delFuncCalled := 0 |
||||
|
delFunc := func() { |
||||
|
delFuncCalled++ |
||||
|
} |
||||
|
|
||||
|
c := NewCache(NewLRU(2)) |
||||
|
set(c, 0, 1, 1, 1, nil).Release() |
||||
|
set(c, 0, 2, 2, 1, nil).Release() |
||||
|
|
||||
|
if ok := c.Delete(0, 1, delFunc); !ok { |
||||
|
t.Error("Cache.Delete on #1 return false") |
||||
|
} |
||||
|
if h := c.Get(0, 1, nil); h != nil { |
||||
|
t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value()) |
||||
|
} |
||||
|
if ok := c.Delete(0, 1, delFunc); ok { |
||||
|
t.Error("Cache.Delete on #1 return true") |
||||
|
} |
||||
|
|
||||
|
h2 := c.Get(0, 2, nil) |
||||
|
if h2 == nil { |
||||
|
t.Error("Cache.Get on #2 return nil") |
||||
|
} |
||||
|
if ok := c.Delete(0, 2, delFunc); !ok { |
||||
|
t.Error("(1) Cache.Delete on #2 return false") |
||||
|
} |
||||
|
if ok := c.Delete(0, 2, delFunc); !ok { |
||||
|
t.Error("(2) Cache.Delete on #2 return false") |
||||
|
} |
||||
|
|
||||
|
set(c, 0, 3, 3, 1, nil).Release() |
||||
|
set(c, 0, 4, 4, 1, nil).Release() |
||||
|
c.Get(0, 2, nil).Release() |
||||
|
|
||||
|
for key := 2; key <= 4; key++ { |
||||
|
if h := c.Get(0, uint64(key), nil); h != nil { |
||||
|
h.Release() |
||||
|
} else { |
||||
|
t.Errorf("Cache.Get on #%d return nil", key) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
h2.Release() |
||||
|
if h := c.Get(0, 2, nil); h != nil { |
||||
|
t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value()) |
||||
|
} |
||||
|
|
||||
|
if delFuncCalled != 4 { |
||||
|
t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestLRUCache_Close(t *testing.T) { |
||||
|
relFuncCalled := 0 |
||||
|
relFunc := func() { |
||||
|
relFuncCalled++ |
||||
|
} |
||||
|
delFuncCalled := 0 |
||||
|
delFunc := func() { |
||||
|
delFuncCalled++ |
||||
|
} |
||||
|
|
||||
|
c := NewCache(NewLRU(2)) |
||||
|
set(c, 0, 1, 1, 1, relFunc).Release() |
||||
|
set(c, 0, 2, 2, 1, relFunc).Release() |
||||
|
|
||||
|
h3 := set(c, 0, 3, 3, 1, relFunc) |
||||
|
if h3 == nil { |
||||
|
t.Error("Cache.Get on #3 return nil") |
||||
|
} |
||||
|
if ok := c.Delete(0, 3, delFunc); !ok { |
||||
|
t.Error("Cache.Delete on #3 return false") |
||||
|
} |
||||
|
|
||||
|
c.Close() |
||||
|
|
||||
|
if relFuncCalled != 3 { |
||||
|
t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled) |
||||
|
} |
||||
|
if delFuncCalled != 1 { |
||||
|
t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled) |
||||
|
} |
||||
|
} |
@ -0,0 +1,195 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package cache |
||||
|
|
||||
|
import ( |
||||
|
"sync" |
||||
|
"unsafe" |
||||
|
) |
||||
|
|
||||
|
type lruNode struct { |
||||
|
n *Node |
||||
|
h *Handle |
||||
|
ban bool |
||||
|
|
||||
|
next, prev *lruNode |
||||
|
} |
||||
|
|
||||
|
func (n *lruNode) insert(at *lruNode) { |
||||
|
x := at.next |
||||
|
at.next = n |
||||
|
n.prev = at |
||||
|
n.next = x |
||||
|
x.prev = n |
||||
|
} |
||||
|
|
||||
|
func (n *lruNode) remove() { |
||||
|
if n.prev != nil { |
||||
|
n.prev.next = n.next |
||||
|
n.next.prev = n.prev |
||||
|
n.prev = nil |
||||
|
n.next = nil |
||||
|
} else { |
||||
|
panic("BUG: removing removed node") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type lru struct { |
||||
|
mu sync.Mutex |
||||
|
capacity int |
||||
|
used int |
||||
|
recent lruNode |
||||
|
} |
||||
|
|
||||
|
func (r *lru) reset() { |
||||
|
r.recent.next = &r.recent |
||||
|
r.recent.prev = &r.recent |
||||
|
r.used = 0 |
||||
|
} |
||||
|
|
||||
|
func (r *lru) Capacity() int { |
||||
|
r.mu.Lock() |
||||
|
defer r.mu.Unlock() |
||||
|
return r.capacity |
||||
|
} |
||||
|
|
||||
|
func (r *lru) SetCapacity(capacity int) { |
||||
|
var evicted []*lruNode |
||||
|
|
||||
|
r.mu.Lock() |
||||
|
r.capacity = capacity |
||||
|
for r.used > r.capacity { |
||||
|
rn := r.recent.prev |
||||
|
if rn == nil { |
||||
|
panic("BUG: invalid LRU used or capacity counter") |
||||
|
} |
||||
|
rn.remove() |
||||
|
rn.n.CacheData = nil |
||||
|
r.used -= rn.n.Size() |
||||
|
evicted = append(evicted, rn) |
||||
|
} |
||||
|
r.mu.Unlock() |
||||
|
|
||||
|
for _, rn := range evicted { |
||||
|
rn.h.Release() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (r *lru) Promote(n *Node) { |
||||
|
var evicted []*lruNode |
||||
|
|
||||
|
r.mu.Lock() |
||||
|
if n.CacheData == nil { |
||||
|
if n.Size() <= r.capacity { |
||||
|
rn := &lruNode{n: n, h: n.GetHandle()} |
||||
|
rn.insert(&r.recent) |
||||
|
n.CacheData = unsafe.Pointer(rn) |
||||
|
r.used += n.Size() |
||||
|
|
||||
|
for r.used > r.capacity { |
||||
|
rn := r.recent.prev |
||||
|
if rn == nil { |
||||
|
panic("BUG: invalid LRU used or capacity counter") |
||||
|
} |
||||
|
rn.remove() |
||||
|
rn.n.CacheData = nil |
||||
|
r.used -= rn.n.Size() |
||||
|
evicted = append(evicted, rn) |
||||
|
} |
||||
|
} |
||||
|
} else { |
||||
|
rn := (*lruNode)(n.CacheData) |
||||
|
if !rn.ban { |
||||
|
rn.remove() |
||||
|
rn.insert(&r.recent) |
||||
|
} |
||||
|
} |
||||
|
r.mu.Unlock() |
||||
|
|
||||
|
for _, rn := range evicted { |
||||
|
rn.h.Release() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (r *lru) Ban(n *Node) { |
||||
|
r.mu.Lock() |
||||
|
if n.CacheData == nil { |
||||
|
n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true}) |
||||
|
} else { |
||||
|
rn := (*lruNode)(n.CacheData) |
||||
|
if !rn.ban { |
||||
|
rn.remove() |
||||
|
rn.ban = true |
||||
|
r.used -= rn.n.Size() |
||||
|
r.mu.Unlock() |
||||
|
|
||||
|
rn.h.Release() |
||||
|
rn.h = nil |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
r.mu.Unlock() |
||||
|
} |
||||
|
|
||||
|
func (r *lru) Evict(n *Node) { |
||||
|
r.mu.Lock() |
||||
|
rn := (*lruNode)(n.CacheData) |
||||
|
if rn == nil || rn.ban { |
||||
|
r.mu.Unlock() |
||||
|
return |
||||
|
} |
||||
|
n.CacheData = nil |
||||
|
r.mu.Unlock() |
||||
|
|
||||
|
rn.h.Release() |
||||
|
} |
||||
|
|
||||
|
func (r *lru) EvictNS(ns uint64) { |
||||
|
var evicted []*lruNode |
||||
|
|
||||
|
r.mu.Lock() |
||||
|
for e := r.recent.prev; e != &r.recent; { |
||||
|
rn := e |
||||
|
e = e.prev |
||||
|
if rn.n.NS() == ns { |
||||
|
rn.remove() |
||||
|
rn.n.CacheData = nil |
||||
|
r.used -= rn.n.Size() |
||||
|
evicted = append(evicted, rn) |
||||
|
} |
||||
|
} |
||||
|
r.mu.Unlock() |
||||
|
|
||||
|
for _, rn := range evicted { |
||||
|
rn.h.Release() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (r *lru) EvictAll() { |
||||
|
r.mu.Lock() |
||||
|
back := r.recent.prev |
||||
|
for rn := back; rn != &r.recent; rn = rn.prev { |
||||
|
rn.n.CacheData = nil |
||||
|
} |
||||
|
r.reset() |
||||
|
r.mu.Unlock() |
||||
|
|
||||
|
for rn := back; rn != &r.recent; rn = rn.prev { |
||||
|
rn.h.Release() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (r *lru) Close() error { |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// NewLRU create a new LRU-cache.
|
||||
|
func NewLRU(capacity int) Cacher { |
||||
|
r := &lru{capacity: capacity} |
||||
|
r.reset() |
||||
|
return r |
||||
|
} |
@ -0,0 +1,67 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"github.com/syndtr/goleveldb/leveldb/comparer" |
||||
|
) |
||||
|
|
||||
|
type iComparer struct { |
||||
|
ucmp comparer.Comparer |
||||
|
} |
||||
|
|
||||
|
func (icmp *iComparer) uName() string { |
||||
|
return icmp.ucmp.Name() |
||||
|
} |
||||
|
|
||||
|
func (icmp *iComparer) uCompare(a, b []byte) int { |
||||
|
return icmp.ucmp.Compare(a, b) |
||||
|
} |
||||
|
|
||||
|
func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte { |
||||
|
return icmp.ucmp.Separator(dst, a, b) |
||||
|
} |
||||
|
|
||||
|
func (icmp *iComparer) uSuccessor(dst, b []byte) []byte { |
||||
|
return icmp.ucmp.Successor(dst, b) |
||||
|
} |
||||
|
|
||||
|
func (icmp *iComparer) Name() string { |
||||
|
return icmp.uName() |
||||
|
} |
||||
|
|
||||
|
func (icmp *iComparer) Compare(a, b []byte) int { |
||||
|
x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey()) |
||||
|
if x == 0 { |
||||
|
if m, n := internalKey(a).num(), internalKey(b).num(); m > n { |
||||
|
return -1 |
||||
|
} else if m < n { |
||||
|
return 1 |
||||
|
} |
||||
|
} |
||||
|
return x |
||||
|
} |
||||
|
|
||||
|
func (icmp *iComparer) Separator(dst, a, b []byte) []byte { |
||||
|
ua, ub := internalKey(a).ukey(), internalKey(b).ukey() |
||||
|
dst = icmp.uSeparator(dst, ua, ub) |
||||
|
if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 { |
||||
|
// Append earliest possible number.
|
||||
|
return append(dst, keyMaxNumBytes...) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (icmp *iComparer) Successor(dst, b []byte) []byte { |
||||
|
ub := internalKey(b).ukey() |
||||
|
dst = icmp.uSuccessor(dst, ub) |
||||
|
if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 { |
||||
|
// Append earliest possible number.
|
||||
|
return append(dst, keyMaxNumBytes...) |
||||
|
} |
||||
|
return nil |
||||
|
} |
@ -0,0 +1,51 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package comparer |
||||
|
|
||||
|
import "bytes" |
||||
|
|
||||
|
type bytesComparer struct{} |
||||
|
|
||||
|
func (bytesComparer) Compare(a, b []byte) int { |
||||
|
return bytes.Compare(a, b) |
||||
|
} |
||||
|
|
||||
|
func (bytesComparer) Name() string { |
||||
|
return "leveldb.BytewiseComparator" |
||||
|
} |
||||
|
|
||||
|
func (bytesComparer) Separator(dst, a, b []byte) []byte { |
||||
|
i, n := 0, len(a) |
||||
|
if n > len(b) { |
||||
|
n = len(b) |
||||
|
} |
||||
|
for ; i < n && a[i] == b[i]; i++ { |
||||
|
} |
||||
|
if i >= n { |
||||
|
// Do not shorten if one string is a prefix of the other
|
||||
|
} else if c := a[i]; c < 0xff && c+1 < b[i] { |
||||
|
dst = append(dst, a[:i+1]...) |
||||
|
dst[i]++ |
||||
|
return dst |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (bytesComparer) Successor(dst, b []byte) []byte { |
||||
|
for i, c := range b { |
||||
|
if c != 0xff { |
||||
|
dst = append(dst, b[:i+1]...) |
||||
|
dst[i]++ |
||||
|
return dst |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// DefaultComparer are default implementation of the Comparer interface.
|
||||
|
// It uses the natural ordering, consistent with bytes.Compare.
|
||||
|
var DefaultComparer = bytesComparer{} |
@ -0,0 +1,57 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
// Package comparer provides interface and implementation for ordering
|
||||
|
// sets of data.
|
||||
|
package comparer |
||||
|
|
||||
|
// BasicComparer is the interface that wraps the basic Compare method.
|
||||
|
type BasicComparer interface { |
||||
|
// Compare returns -1, 0, or +1 depending on whether a is 'less than',
|
||||
|
// 'equal to' or 'greater than' b. The two arguments can only be 'equal'
|
||||
|
// if their contents are exactly equal. Furthermore, the empty slice
|
||||
|
// must be 'less than' any non-empty slice.
|
||||
|
Compare(a, b []byte) int |
||||
|
} |
||||
|
|
||||
|
// Comparer defines a total ordering over the space of []byte keys: a 'less
|
||||
|
// than' relationship.
|
||||
|
type Comparer interface { |
||||
|
BasicComparer |
||||
|
|
||||
|
// Name returns name of the comparer.
|
||||
|
//
|
||||
|
// The Level-DB on-disk format stores the comparer name, and opening a
|
||||
|
// database with a different comparer from the one it was created with
|
||||
|
// will result in an error.
|
||||
|
//
|
||||
|
// An implementation to a new name whenever the comparer implementation
|
||||
|
// changes in a way that will cause the relative ordering of any two keys
|
||||
|
// to change.
|
||||
|
//
|
||||
|
// Names starting with "leveldb." are reserved and should not be used
|
||||
|
// by any users of this package.
|
||||
|
Name() string |
||||
|
|
||||
|
// Bellow are advanced functions used used to reduce the space requirements
|
||||
|
// for internal data structures such as index blocks.
|
||||
|
|
||||
|
// Separator appends a sequence of bytes x to dst such that a <= x && x < b,
|
||||
|
// where 'less than' is consistent with Compare. An implementation should
|
||||
|
// return nil if x equal to a.
|
||||
|
//
|
||||
|
// Either contents of a or b should not by any means modified. Doing so
|
||||
|
// may cause corruption on the internal state.
|
||||
|
Separator(dst, a, b []byte) []byte |
||||
|
|
||||
|
// Successor appends a sequence of bytes x to dst such that x >= b, where
|
||||
|
// 'less than' is consistent with Compare. An implementation should return
|
||||
|
// nil if x equal to b.
|
||||
|
//
|
||||
|
// Contents of b should not by any means modified. Doing so may cause
|
||||
|
// corruption on the internal state.
|
||||
|
Successor(dst, b []byte) []byte |
||||
|
} |
@ -0,0 +1,496 @@ |
|||||
|
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"bytes" |
||||
|
"fmt" |
||||
|
"io" |
||||
|
"math/rand" |
||||
|
"testing" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/filter" |
||||
|
"github.com/syndtr/goleveldb/leveldb/opt" |
||||
|
"github.com/syndtr/goleveldb/leveldb/storage" |
||||
|
) |
||||
|
|
||||
|
const ctValSize = 1000 |
||||
|
|
||||
|
type dbCorruptHarness struct { |
||||
|
dbHarness |
||||
|
} |
||||
|
|
||||
|
func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { |
||||
|
h := new(dbCorruptHarness) |
||||
|
h.init(t, o) |
||||
|
return h |
||||
|
} |
||||
|
|
||||
|
func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { |
||||
|
return newDbCorruptHarnessWopt(t, &opt.Options{ |
||||
|
BlockCacheCapacity: 100, |
||||
|
Strict: opt.StrictJournalChecksum, |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func (h *dbCorruptHarness) recover() { |
||||
|
p := &h.dbHarness |
||||
|
t := p.t |
||||
|
|
||||
|
var err error |
||||
|
p.db, err = Recover(h.stor, h.o) |
||||
|
if err != nil { |
||||
|
t.Fatal("Repair: got error: ", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (h *dbCorruptHarness) build(n int) { |
||||
|
p := &h.dbHarness |
||||
|
t := p.t |
||||
|
db := p.db |
||||
|
|
||||
|
batch := new(Batch) |
||||
|
for i := 0; i < n; i++ { |
||||
|
batch.Reset() |
||||
|
batch.Put(tkey(i), tval(i, ctValSize)) |
||||
|
err := db.Write(batch, p.wo) |
||||
|
if err != nil { |
||||
|
t.Fatal("write error: ", err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { |
||||
|
p := &h.dbHarness |
||||
|
t := p.t |
||||
|
db := p.db |
||||
|
|
||||
|
batch := new(Batch) |
||||
|
for i := range rnd.Perm(n) { |
||||
|
batch.Reset() |
||||
|
batch.Put(tkey(i), tval(i, ctValSize)) |
||||
|
err := db.Write(batch, p.wo) |
||||
|
if err != nil { |
||||
|
t.Fatal("write error: ", err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { |
||||
|
p := &h.dbHarness |
||||
|
t := p.t |
||||
|
db := p.db |
||||
|
|
||||
|
batch := new(Batch) |
||||
|
for i := 0; i < n; i++ { |
||||
|
batch.Reset() |
||||
|
batch.Delete(tkey(rnd.Intn(max))) |
||||
|
err := db.Write(batch, p.wo) |
||||
|
if err != nil { |
||||
|
t.Fatal("write error: ", err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) { |
||||
|
p := &h.dbHarness |
||||
|
t := p.t |
||||
|
|
||||
|
fds, _ := p.stor.List(ft) |
||||
|
sortFds(fds) |
||||
|
if fi < 0 { |
||||
|
fi = len(fds) - 1 |
||||
|
} |
||||
|
if fi >= len(fds) { |
||||
|
t.Fatalf("no such file with type %q with index %d", ft, fi) |
||||
|
} |
||||
|
|
||||
|
fd := fds[fi] |
||||
|
r, err := h.stor.Open(fd) |
||||
|
if err != nil { |
||||
|
t.Fatal("cannot open file: ", err) |
||||
|
} |
||||
|
x, err := r.Seek(0, 2) |
||||
|
if err != nil { |
||||
|
t.Fatal("cannot query file size: ", err) |
||||
|
} |
||||
|
m := int(x) |
||||
|
if _, err := r.Seek(0, 0); err != nil { |
||||
|
t.Fatal(err) |
||||
|
} |
||||
|
|
||||
|
if offset < 0 { |
||||
|
if -offset > m { |
||||
|
offset = 0 |
||||
|
} else { |
||||
|
offset = m + offset |
||||
|
} |
||||
|
} |
||||
|
if offset > m { |
||||
|
offset = m |
||||
|
} |
||||
|
if offset+n > m { |
||||
|
n = m - offset |
||||
|
} |
||||
|
|
||||
|
buf := make([]byte, m) |
||||
|
_, err = io.ReadFull(r, buf) |
||||
|
if err != nil { |
||||
|
t.Fatal("cannot read file: ", err) |
||||
|
} |
||||
|
r.Close() |
||||
|
|
||||
|
for i := 0; i < n; i++ { |
||||
|
buf[offset+i] ^= 0x80 |
||||
|
} |
||||
|
|
||||
|
err = h.stor.Remove(fd) |
||||
|
if err != nil { |
||||
|
t.Fatal("cannot remove old file: ", err) |
||||
|
} |
||||
|
w, err := h.stor.Create(fd) |
||||
|
if err != nil { |
||||
|
t.Fatal("cannot create new file: ", err) |
||||
|
} |
||||
|
_, err = w.Write(buf) |
||||
|
if err != nil { |
||||
|
t.Fatal("cannot write new file: ", err) |
||||
|
} |
||||
|
w.Close() |
||||
|
} |
||||
|
|
||||
|
func (h *dbCorruptHarness) removeAll(ft storage.FileType) { |
||||
|
fds, err := h.stor.List(ft) |
||||
|
if err != nil { |
||||
|
h.t.Fatal("get files: ", err) |
||||
|
} |
||||
|
for _, fd := range fds { |
||||
|
if err := h.stor.Remove(fd); err != nil { |
||||
|
h.t.Error("remove file: ", err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (h *dbCorruptHarness) forceRemoveAll(ft storage.FileType) { |
||||
|
fds, err := h.stor.List(ft) |
||||
|
if err != nil { |
||||
|
h.t.Fatal("get files: ", err) |
||||
|
} |
||||
|
for _, fd := range fds { |
||||
|
if err := h.stor.ForceRemove(fd); err != nil { |
||||
|
h.t.Error("remove file: ", err) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (h *dbCorruptHarness) removeOne(ft storage.FileType) { |
||||
|
fds, err := h.stor.List(ft) |
||||
|
if err != nil { |
||||
|
h.t.Fatal("get files: ", err) |
||||
|
} |
||||
|
fd := fds[rand.Intn(len(fds))] |
||||
|
h.t.Logf("removing file @%d", fd.Num) |
||||
|
if err := h.stor.Remove(fd); err != nil { |
||||
|
h.t.Error("remove file: ", err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (h *dbCorruptHarness) check(min, max int) { |
||||
|
p := &h.dbHarness |
||||
|
t := p.t |
||||
|
db := p.db |
||||
|
|
||||
|
var n, badk, badv, missed, good int |
||||
|
iter := db.NewIterator(nil, p.ro) |
||||
|
for iter.Next() { |
||||
|
k := 0 |
||||
|
fmt.Sscanf(string(iter.Key()), "%d", &k) |
||||
|
if k < n { |
||||
|
badk++ |
||||
|
continue |
||||
|
} |
||||
|
missed += k - n |
||||
|
n = k + 1 |
||||
|
if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { |
||||
|
badv++ |
||||
|
} else { |
||||
|
good++ |
||||
|
} |
||||
|
} |
||||
|
err := iter.Error() |
||||
|
iter.Release() |
||||
|
t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", |
||||
|
min, max, good, badk, badv, missed, err) |
||||
|
if good < min || good > max { |
||||
|
t.Errorf("good entries number not in range") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_Journal(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.build(100) |
||||
|
h.check(100, 100) |
||||
|
h.closeDB() |
||||
|
h.corrupt(storage.TypeJournal, -1, 19, 1) |
||||
|
h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1) |
||||
|
|
||||
|
h.openDB() |
||||
|
h.check(36, 36) |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_Table(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.build(100) |
||||
|
h.compactMem() |
||||
|
h.compactRangeAt(0, "", "") |
||||
|
h.compactRangeAt(1, "", "") |
||||
|
h.closeDB() |
||||
|
h.corrupt(storage.TypeTable, -1, 100, 1) |
||||
|
|
||||
|
h.openDB() |
||||
|
h.check(99, 99) |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_TableIndex(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.build(10000) |
||||
|
h.compactMem() |
||||
|
h.closeDB() |
||||
|
h.corrupt(storage.TypeTable, -1, -2000, 500) |
||||
|
|
||||
|
h.openDB() |
||||
|
h.check(5000, 9999) |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_MissingManifest(t *testing.T) { |
||||
|
rnd := rand.New(rand.NewSource(0x0badda7a)) |
||||
|
h := newDbCorruptHarnessWopt(t, &opt.Options{ |
||||
|
BlockCacheCapacity: 100, |
||||
|
Strict: opt.StrictJournalChecksum, |
||||
|
WriteBuffer: 1000 * 60, |
||||
|
}) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.build(1000) |
||||
|
h.compactMem() |
||||
|
h.buildShuffled(1000, rnd) |
||||
|
h.compactMem() |
||||
|
h.deleteRand(500, 1000, rnd) |
||||
|
h.compactMem() |
||||
|
h.buildShuffled(1000, rnd) |
||||
|
h.compactMem() |
||||
|
h.deleteRand(500, 1000, rnd) |
||||
|
h.compactMem() |
||||
|
h.buildShuffled(1000, rnd) |
||||
|
h.compactMem() |
||||
|
h.closeDB() |
||||
|
|
||||
|
h.forceRemoveAll(storage.TypeManifest) |
||||
|
h.openAssert(false) |
||||
|
|
||||
|
h.recover() |
||||
|
h.check(1000, 1000) |
||||
|
h.build(1000) |
||||
|
h.compactMem() |
||||
|
h.compactRange("", "") |
||||
|
h.closeDB() |
||||
|
|
||||
|
h.recover() |
||||
|
h.check(1000, 1000) |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.put("foo", "v1") |
||||
|
h.put("foo", "v2") |
||||
|
h.put("foo", "v3") |
||||
|
h.put("foo", "v4") |
||||
|
h.put("foo", "v5") |
||||
|
h.closeDB() |
||||
|
|
||||
|
h.recover() |
||||
|
h.getVal("foo", "v5") |
||||
|
h.put("foo", "v6") |
||||
|
h.getVal("foo", "v6") |
||||
|
|
||||
|
h.reopenDB() |
||||
|
h.getVal("foo", "v6") |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.put("foo", "v1") |
||||
|
h.put("foo", "v2") |
||||
|
h.put("foo", "v3") |
||||
|
h.compactMem() |
||||
|
h.put("foo", "v4") |
||||
|
h.put("foo", "v5") |
||||
|
h.compactMem() |
||||
|
h.closeDB() |
||||
|
|
||||
|
h.recover() |
||||
|
h.getVal("foo", "v5") |
||||
|
h.put("foo", "v6") |
||||
|
h.getVal("foo", "v6") |
||||
|
|
||||
|
h.reopenDB() |
||||
|
h.getVal("foo", "v6") |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_CorruptedManifest(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.put("foo", "hello") |
||||
|
h.compactMem() |
||||
|
h.compactRange("", "") |
||||
|
h.closeDB() |
||||
|
h.corrupt(storage.TypeManifest, -1, 0, 1000) |
||||
|
h.openAssert(false) |
||||
|
|
||||
|
h.recover() |
||||
|
h.getVal("foo", "hello") |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_CompactionInputError(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.build(10) |
||||
|
h.compactMem() |
||||
|
h.closeDB() |
||||
|
h.corrupt(storage.TypeTable, -1, 100, 1) |
||||
|
|
||||
|
h.openDB() |
||||
|
h.check(9, 9) |
||||
|
|
||||
|
h.build(10000) |
||||
|
h.check(10000, 10000) |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_UnrelatedKeys(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.build(10) |
||||
|
h.compactMem() |
||||
|
h.closeDB() |
||||
|
h.corrupt(storage.TypeTable, -1, 100, 1) |
||||
|
|
||||
|
h.openDB() |
||||
|
h.put(string(tkey(1000)), string(tval(1000, ctValSize))) |
||||
|
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) |
||||
|
h.compactMem() |
||||
|
h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.put("a", "v1") |
||||
|
h.put("b", "v1") |
||||
|
h.compactMem() |
||||
|
h.put("a", "v2") |
||||
|
h.put("b", "v2") |
||||
|
h.compactMem() |
||||
|
h.put("a", "v3") |
||||
|
h.put("b", "v3") |
||||
|
h.compactMem() |
||||
|
h.put("c", "v0") |
||||
|
h.put("d", "v0") |
||||
|
h.compactMem() |
||||
|
h.compactRangeAt(1, "", "") |
||||
|
h.closeDB() |
||||
|
|
||||
|
h.recover() |
||||
|
h.getVal("a", "v3") |
||||
|
h.getVal("b", "v3") |
||||
|
h.getVal("c", "v0") |
||||
|
h.getVal("d", "v0") |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.put("a", "v1") |
||||
|
h.put("b", "v1") |
||||
|
h.compactMem() |
||||
|
h.put("a", "v2") |
||||
|
h.put("b", "v2") |
||||
|
h.compactMem() |
||||
|
h.put("a", "v3") |
||||
|
h.put("b", "v3") |
||||
|
h.compactMem() |
||||
|
h.put("c", "v0") |
||||
|
h.put("d", "v0") |
||||
|
h.compactMem() |
||||
|
h.compactRangeAt(0, "", "") |
||||
|
h.closeDB() |
||||
|
|
||||
|
h.recover() |
||||
|
h.getVal("a", "v3") |
||||
|
h.getVal("b", "v3") |
||||
|
h.getVal("c", "v0") |
||||
|
h.getVal("d", "v0") |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_MissingTableFiles(t *testing.T) { |
||||
|
h := newDbCorruptHarness(t) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.put("a", "v1") |
||||
|
h.put("b", "v1") |
||||
|
h.compactMem() |
||||
|
h.put("c", "v2") |
||||
|
h.put("d", "v2") |
||||
|
h.compactMem() |
||||
|
h.put("e", "v3") |
||||
|
h.put("f", "v3") |
||||
|
h.closeDB() |
||||
|
|
||||
|
h.removeOne(storage.TypeTable) |
||||
|
h.openAssert(false) |
||||
|
} |
||||
|
|
||||
|
func TestCorruptDB_RecoverTable(t *testing.T) { |
||||
|
h := newDbCorruptHarnessWopt(t, &opt.Options{ |
||||
|
WriteBuffer: 112 * opt.KiB, |
||||
|
CompactionTableSize: 90 * opt.KiB, |
||||
|
Filter: filter.NewBloomFilter(10), |
||||
|
}) |
||||
|
defer h.close() |
||||
|
|
||||
|
h.build(1000) |
||||
|
h.compactMem() |
||||
|
h.compactRangeAt(0, "", "") |
||||
|
h.compactRangeAt(1, "", "") |
||||
|
seq := h.db.seq |
||||
|
h.closeDB() |
||||
|
h.corrupt(storage.TypeTable, 0, 1000, 1) |
||||
|
h.corrupt(storage.TypeTable, 3, 10000, 1) |
||||
|
// Corrupted filter shouldn't affect recovery.
|
||||
|
h.corrupt(storage.TypeTable, 3, 113888, 10) |
||||
|
h.corrupt(storage.TypeTable, -1, 20000, 1) |
||||
|
|
||||
|
h.recover() |
||||
|
if h.db.seq != seq { |
||||
|
t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq) |
||||
|
} |
||||
|
h.check(985, 985) |
||||
|
} |
1091
vendor/src/github.com/syndtr/goleveldb/leveldb/db.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,826 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"sync" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/errors" |
||||
|
"github.com/syndtr/goleveldb/leveldb/opt" |
||||
|
"github.com/syndtr/goleveldb/leveldb/storage" |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") |
||||
|
) |
||||
|
|
||||
|
type cStat struct { |
||||
|
duration time.Duration |
||||
|
read int64 |
||||
|
write int64 |
||||
|
} |
||||
|
|
||||
|
func (p *cStat) add(n *cStatStaging) { |
||||
|
p.duration += n.duration |
||||
|
p.read += n.read |
||||
|
p.write += n.write |
||||
|
} |
||||
|
|
||||
|
func (p *cStat) get() (duration time.Duration, read, write int64) { |
||||
|
return p.duration, p.read, p.write |
||||
|
} |
||||
|
|
||||
|
type cStatStaging struct { |
||||
|
start time.Time |
||||
|
duration time.Duration |
||||
|
on bool |
||||
|
read int64 |
||||
|
write int64 |
||||
|
} |
||||
|
|
||||
|
func (p *cStatStaging) startTimer() { |
||||
|
if !p.on { |
||||
|
p.start = time.Now() |
||||
|
p.on = true |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (p *cStatStaging) stopTimer() { |
||||
|
if p.on { |
||||
|
p.duration += time.Since(p.start) |
||||
|
p.on = false |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type cStats struct { |
||||
|
lk sync.Mutex |
||||
|
stats []cStat |
||||
|
} |
||||
|
|
||||
|
func (p *cStats) addStat(level int, n *cStatStaging) { |
||||
|
p.lk.Lock() |
||||
|
if level >= len(p.stats) { |
||||
|
newStats := make([]cStat, level+1) |
||||
|
copy(newStats, p.stats) |
||||
|
p.stats = newStats |
||||
|
} |
||||
|
p.stats[level].add(n) |
||||
|
p.lk.Unlock() |
||||
|
} |
||||
|
|
||||
|
func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) { |
||||
|
p.lk.Lock() |
||||
|
defer p.lk.Unlock() |
||||
|
if level < len(p.stats) { |
||||
|
return p.stats[level].get() |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (db *DB) compactionError() { |
||||
|
var err error |
||||
|
noerr: |
||||
|
// No error.
|
||||
|
for { |
||||
|
select { |
||||
|
case err = <-db.compErrSetC: |
||||
|
switch { |
||||
|
case err == nil: |
||||
|
case err == ErrReadOnly, errors.IsCorrupted(err): |
||||
|
goto hasperr |
||||
|
default: |
||||
|
goto haserr |
||||
|
} |
||||
|
case <-db.closeC: |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
haserr: |
||||
|
// Transient error.
|
||||
|
for { |
||||
|
select { |
||||
|
case db.compErrC <- err: |
||||
|
case err = <-db.compErrSetC: |
||||
|
switch { |
||||
|
case err == nil: |
||||
|
goto noerr |
||||
|
case err == ErrReadOnly, errors.IsCorrupted(err): |
||||
|
goto hasperr |
||||
|
default: |
||||
|
} |
||||
|
case <-db.closeC: |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
hasperr: |
||||
|
// Persistent error.
|
||||
|
for { |
||||
|
select { |
||||
|
case db.compErrC <- err: |
||||
|
case db.compPerErrC <- err: |
||||
|
case db.writeLockC <- struct{}{}: |
||||
|
// Hold write lock, so that write won't pass-through.
|
||||
|
db.compWriteLocking = true |
||||
|
case <-db.closeC: |
||||
|
if db.compWriteLocking { |
||||
|
// We should release the lock or Close will hang.
|
||||
|
<-db.writeLockC |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type compactionTransactCounter int |
||||
|
|
||||
|
func (cnt *compactionTransactCounter) incr() { |
||||
|
*cnt++ |
||||
|
} |
||||
|
|
||||
|
type compactionTransactInterface interface { |
||||
|
run(cnt *compactionTransactCounter) error |
||||
|
revert() error |
||||
|
} |
||||
|
|
||||
|
func (db *DB) compactionTransact(name string, t compactionTransactInterface) { |
||||
|
defer func() { |
||||
|
if x := recover(); x != nil { |
||||
|
if x == errCompactionTransactExiting { |
||||
|
if err := t.revert(); err != nil { |
||||
|
db.logf("%s revert error %q", name, err) |
||||
|
} |
||||
|
} |
||||
|
panic(x) |
||||
|
} |
||||
|
}() |
||||
|
|
||||
|
const ( |
||||
|
backoffMin = 1 * time.Second |
||||
|
backoffMax = 8 * time.Second |
||||
|
backoffMul = 2 * time.Second |
||||
|
) |
||||
|
var ( |
||||
|
backoff = backoffMin |
||||
|
backoffT = time.NewTimer(backoff) |
||||
|
lastCnt = compactionTransactCounter(0) |
||||
|
|
||||
|
disableBackoff = db.s.o.GetDisableCompactionBackoff() |
||||
|
) |
||||
|
for n := 0; ; n++ { |
||||
|
// Check whether the DB is closed.
|
||||
|
if db.isClosed() { |
||||
|
db.logf("%s exiting", name) |
||||
|
db.compactionExitTransact() |
||||
|
} else if n > 0 { |
||||
|
db.logf("%s retrying N·%d", name, n) |
||||
|
} |
||||
|
|
||||
|
// Execute.
|
||||
|
cnt := compactionTransactCounter(0) |
||||
|
err := t.run(&cnt) |
||||
|
if err != nil { |
||||
|
db.logf("%s error I·%d %q", name, cnt, err) |
||||
|
} |
||||
|
|
||||
|
// Set compaction error status.
|
||||
|
select { |
||||
|
case db.compErrSetC <- err: |
||||
|
case perr := <-db.compPerErrC: |
||||
|
if err != nil { |
||||
|
db.logf("%s exiting (persistent error %q)", name, perr) |
||||
|
db.compactionExitTransact() |
||||
|
} |
||||
|
case <-db.closeC: |
||||
|
db.logf("%s exiting", name) |
||||
|
db.compactionExitTransact() |
||||
|
} |
||||
|
if err == nil { |
||||
|
return |
||||
|
} |
||||
|
if errors.IsCorrupted(err) { |
||||
|
db.logf("%s exiting (corruption detected)", name) |
||||
|
db.compactionExitTransact() |
||||
|
} |
||||
|
|
||||
|
if !disableBackoff { |
||||
|
// Reset backoff duration if counter is advancing.
|
||||
|
if cnt > lastCnt { |
||||
|
backoff = backoffMin |
||||
|
lastCnt = cnt |
||||
|
} |
||||
|
|
||||
|
// Backoff.
|
||||
|
backoffT.Reset(backoff) |
||||
|
if backoff < backoffMax { |
||||
|
backoff *= backoffMul |
||||
|
if backoff > backoffMax { |
||||
|
backoff = backoffMax |
||||
|
} |
||||
|
} |
||||
|
select { |
||||
|
case <-backoffT.C: |
||||
|
case <-db.closeC: |
||||
|
db.logf("%s exiting", name) |
||||
|
db.compactionExitTransact() |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type compactionTransactFunc struct { |
||||
|
runFunc func(cnt *compactionTransactCounter) error |
||||
|
revertFunc func() error |
||||
|
} |
||||
|
|
||||
|
func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error { |
||||
|
return t.runFunc(cnt) |
||||
|
} |
||||
|
|
||||
|
func (t *compactionTransactFunc) revert() error { |
||||
|
if t.revertFunc != nil { |
||||
|
return t.revertFunc() |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) { |
||||
|
db.compactionTransact(name, &compactionTransactFunc{run, revert}) |
||||
|
} |
||||
|
|
||||
|
func (db *DB) compactionExitTransact() { |
||||
|
panic(errCompactionTransactExiting) |
||||
|
} |
||||
|
|
||||
|
func (db *DB) compactionCommit(name string, rec *sessionRecord) { |
||||
|
db.compCommitLk.Lock() |
||||
|
defer db.compCommitLk.Unlock() // Defer is necessary.
|
||||
|
db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error { |
||||
|
return db.s.commit(rec) |
||||
|
}, nil) |
||||
|
} |
||||
|
|
||||
|
func (db *DB) memCompaction() { |
||||
|
mdb := db.getFrozenMem() |
||||
|
if mdb == nil { |
||||
|
return |
||||
|
} |
||||
|
defer mdb.decref() |
||||
|
|
||||
|
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size())) |
||||
|
|
||||
|
// Don't compact empty memdb.
|
||||
|
if mdb.Len() == 0 { |
||||
|
db.logf("memdb@flush skipping") |
||||
|
// drop frozen memdb
|
||||
|
db.dropFrozenMem() |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// Pause table compaction.
|
||||
|
resumeC := make(chan struct{}) |
||||
|
select { |
||||
|
case db.tcompPauseC <- (chan<- struct{})(resumeC): |
||||
|
case <-db.compPerErrC: |
||||
|
close(resumeC) |
||||
|
resumeC = nil |
||||
|
case <-db.closeC: |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
var ( |
||||
|
rec = &sessionRecord{} |
||||
|
stats = &cStatStaging{} |
||||
|
flushLevel int |
||||
|
) |
||||
|
|
||||
|
// Generate tables.
|
||||
|
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) { |
||||
|
stats.startTimer() |
||||
|
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel) |
||||
|
stats.stopTimer() |
||||
|
return |
||||
|
}, func() error { |
||||
|
for _, r := range rec.addedTables { |
||||
|
db.logf("memdb@flush revert @%d", r.num) |
||||
|
if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
}) |
||||
|
|
||||
|
rec.setJournalNum(db.journalFd.Num) |
||||
|
rec.setSeqNum(db.frozenSeq) |
||||
|
|
||||
|
// Commit.
|
||||
|
stats.startTimer() |
||||
|
db.compactionCommit("memdb", rec) |
||||
|
stats.stopTimer() |
||||
|
|
||||
|
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration) |
||||
|
|
||||
|
for _, r := range rec.addedTables { |
||||
|
stats.write += r.size |
||||
|
} |
||||
|
db.compStats.addStat(flushLevel, stats) |
||||
|
|
||||
|
// Drop frozen memdb.
|
||||
|
db.dropFrozenMem() |
||||
|
|
||||
|
// Resume table compaction.
|
||||
|
if resumeC != nil { |
||||
|
select { |
||||
|
case <-resumeC: |
||||
|
close(resumeC) |
||||
|
case <-db.closeC: |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Trigger table compaction.
|
||||
|
db.compTrigger(db.tcompCmdC) |
||||
|
} |
||||
|
|
||||
|
type tableCompactionBuilder struct { |
||||
|
db *DB |
||||
|
s *session |
||||
|
c *compaction |
||||
|
rec *sessionRecord |
||||
|
stat0, stat1 *cStatStaging |
||||
|
|
||||
|
snapHasLastUkey bool |
||||
|
snapLastUkey []byte |
||||
|
snapLastSeq uint64 |
||||
|
snapIter int |
||||
|
snapKerrCnt int |
||||
|
snapDropCnt int |
||||
|
|
||||
|
kerrCnt int |
||||
|
dropCnt int |
||||
|
|
||||
|
minSeq uint64 |
||||
|
strict bool |
||||
|
tableSize int |
||||
|
|
||||
|
tw *tWriter |
||||
|
} |
||||
|
|
||||
|
func (b *tableCompactionBuilder) appendKV(key, value []byte) error { |
||||
|
// Create new table if not already.
|
||||
|
if b.tw == nil { |
||||
|
// Check for pause event.
|
||||
|
if b.db != nil { |
||||
|
select { |
||||
|
case ch := <-b.db.tcompPauseC: |
||||
|
b.db.pauseCompaction(ch) |
||||
|
case <-b.db.closeC: |
||||
|
b.db.compactionExitTransact() |
||||
|
default: |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Create new table.
|
||||
|
var err error |
||||
|
b.tw, err = b.s.tops.create() |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Write key/value into table.
|
||||
|
return b.tw.append(key, value) |
||||
|
} |
||||
|
|
||||
|
func (b *tableCompactionBuilder) needFlush() bool { |
||||
|
return b.tw.tw.BytesLen() >= b.tableSize |
||||
|
} |
||||
|
|
||||
|
func (b *tableCompactionBuilder) flush() error { |
||||
|
t, err := b.tw.finish() |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
b.rec.addTableFile(b.c.sourceLevel+1, t) |
||||
|
b.stat1.write += t.size |
||||
|
b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax) |
||||
|
b.tw = nil |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (b *tableCompactionBuilder) cleanup() { |
||||
|
if b.tw != nil { |
||||
|
b.tw.drop() |
||||
|
b.tw = nil |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error { |
||||
|
snapResumed := b.snapIter > 0 |
||||
|
hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
|
||||
|
lastUkey := append([]byte{}, b.snapLastUkey...) |
||||
|
lastSeq := b.snapLastSeq |
||||
|
b.kerrCnt = b.snapKerrCnt |
||||
|
b.dropCnt = b.snapDropCnt |
||||
|
// Restore compaction state.
|
||||
|
b.c.restore() |
||||
|
|
||||
|
defer b.cleanup() |
||||
|
|
||||
|
b.stat1.startTimer() |
||||
|
defer b.stat1.stopTimer() |
||||
|
|
||||
|
iter := b.c.newIterator() |
||||
|
defer iter.Release() |
||||
|
for i := 0; iter.Next(); i++ { |
||||
|
// Incr transact counter.
|
||||
|
cnt.incr() |
||||
|
|
||||
|
// Skip until last state.
|
||||
|
if i < b.snapIter { |
||||
|
continue |
||||
|
} |
||||
|
|
||||
|
resumed := false |
||||
|
if snapResumed { |
||||
|
resumed = true |
||||
|
snapResumed = false |
||||
|
} |
||||
|
|
||||
|
ikey := iter.Key() |
||||
|
ukey, seq, kt, kerr := parseInternalKey(ikey) |
||||
|
|
||||
|
if kerr == nil { |
||||
|
shouldStop := !resumed && b.c.shouldStopBefore(ikey) |
||||
|
|
||||
|
if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 { |
||||
|
// First occurrence of this user key.
|
||||
|
|
||||
|
// Only rotate tables if ukey doesn't hop across.
|
||||
|
if b.tw != nil && (shouldStop || b.needFlush()) { |
||||
|
if err := b.flush(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// Creates snapshot of the state.
|
||||
|
b.c.save() |
||||
|
b.snapHasLastUkey = hasLastUkey |
||||
|
b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...) |
||||
|
b.snapLastSeq = lastSeq |
||||
|
b.snapIter = i |
||||
|
b.snapKerrCnt = b.kerrCnt |
||||
|
b.snapDropCnt = b.dropCnt |
||||
|
} |
||||
|
|
||||
|
hasLastUkey = true |
||||
|
lastUkey = append(lastUkey[:0], ukey...) |
||||
|
lastSeq = keyMaxSeq |
||||
|
} |
||||
|
|
||||
|
switch { |
||||
|
case lastSeq <= b.minSeq: |
||||
|
// Dropped because newer entry for same user key exist
|
||||
|
fallthrough // (A)
|
||||
|
case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey): |
||||
|
// For this user key:
|
||||
|
// (1) there is no data in higher levels
|
||||
|
// (2) data in lower levels will have larger seq numbers
|
||||
|
// (3) data in layers that are being compacted here and have
|
||||
|
// smaller seq numbers will be dropped in the next
|
||||
|
// few iterations of this loop (by rule (A) above).
|
||||
|
// Therefore this deletion marker is obsolete and can be dropped.
|
||||
|
lastSeq = seq |
||||
|
b.dropCnt++ |
||||
|
continue |
||||
|
default: |
||||
|
lastSeq = seq |
||||
|
} |
||||
|
} else { |
||||
|
if b.strict { |
||||
|
return kerr |
||||
|
} |
||||
|
|
||||
|
// Don't drop corrupted keys.
|
||||
|
hasLastUkey = false |
||||
|
lastUkey = lastUkey[:0] |
||||
|
lastSeq = keyMaxSeq |
||||
|
b.kerrCnt++ |
||||
|
} |
||||
|
|
||||
|
if err := b.appendKV(ikey, iter.Value()); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if err := iter.Error(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// Finish last table.
|
||||
|
if b.tw != nil && !b.tw.empty() { |
||||
|
return b.flush() |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (b *tableCompactionBuilder) revert() error { |
||||
|
for _, at := range b.rec.addedTables { |
||||
|
b.s.logf("table@build revert @%d", at.num) |
||||
|
if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (db *DB) tableCompaction(c *compaction, noTrivial bool) { |
||||
|
defer c.release() |
||||
|
|
||||
|
rec := &sessionRecord{} |
||||
|
rec.addCompPtr(c.sourceLevel, c.imax) |
||||
|
|
||||
|
if !noTrivial && c.trivial() { |
||||
|
t := c.levels[0][0] |
||||
|
db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1) |
||||
|
rec.delTable(c.sourceLevel, t.fd.Num) |
||||
|
rec.addTableFile(c.sourceLevel+1, t) |
||||
|
db.compactionCommit("table-move", rec) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
var stats [2]cStatStaging |
||||
|
for i, tables := range c.levels { |
||||
|
for _, t := range tables { |
||||
|
stats[i].read += t.size |
||||
|
// Insert deleted tables into record
|
||||
|
rec.delTable(c.sourceLevel+i, t.fd.Num) |
||||
|
} |
||||
|
} |
||||
|
sourceSize := int(stats[0].read + stats[1].read) |
||||
|
minSeq := db.minSeq() |
||||
|
db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq) |
||||
|
|
||||
|
b := &tableCompactionBuilder{ |
||||
|
db: db, |
||||
|
s: db.s, |
||||
|
c: c, |
||||
|
rec: rec, |
||||
|
stat1: &stats[1], |
||||
|
minSeq: minSeq, |
||||
|
strict: db.s.o.GetStrict(opt.StrictCompaction), |
||||
|
tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1), |
||||
|
} |
||||
|
db.compactionTransact("table@build", b) |
||||
|
|
||||
|
// Commit.
|
||||
|
stats[1].startTimer() |
||||
|
db.compactionCommit("table", rec) |
||||
|
stats[1].stopTimer() |
||||
|
|
||||
|
resultSize := int(stats[1].write) |
||||
|
db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration) |
||||
|
|
||||
|
// Save compaction stats
|
||||
|
for i := range stats { |
||||
|
db.compStats.addStat(c.sourceLevel+1, &stats[i]) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error { |
||||
|
db.logf("table@compaction range L%d %q:%q", level, umin, umax) |
||||
|
if level >= 0 { |
||||
|
if c := db.s.getCompactionRange(level, umin, umax, true); c != nil { |
||||
|
db.tableCompaction(c, true) |
||||
|
} |
||||
|
} else { |
||||
|
// Retry until nothing to compact.
|
||||
|
for { |
||||
|
compacted := false |
||||
|
|
||||
|
// Scan for maximum level with overlapped tables.
|
||||
|
v := db.s.version() |
||||
|
m := 1 |
||||
|
for i := m; i < len(v.levels); i++ { |
||||
|
tables := v.levels[i] |
||||
|
if tables.overlaps(db.s.icmp, umin, umax, false) { |
||||
|
m = i |
||||
|
} |
||||
|
} |
||||
|
v.release() |
||||
|
|
||||
|
for level := 0; level < m; level++ { |
||||
|
if c := db.s.getCompactionRange(level, umin, umax, false); c != nil { |
||||
|
db.tableCompaction(c, true) |
||||
|
compacted = true |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if !compacted { |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (db *DB) tableAutoCompaction() { |
||||
|
if c := db.s.pickCompaction(); c != nil { |
||||
|
db.tableCompaction(c, false) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (db *DB) tableNeedCompaction() bool { |
||||
|
v := db.s.version() |
||||
|
defer v.release() |
||||
|
return v.needCompaction() |
||||
|
} |
||||
|
|
||||
|
func (db *DB) pauseCompaction(ch chan<- struct{}) { |
||||
|
select { |
||||
|
case ch <- struct{}{}: |
||||
|
case <-db.closeC: |
||||
|
db.compactionExitTransact() |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type cCmd interface { |
||||
|
ack(err error) |
||||
|
} |
||||
|
|
||||
|
type cAuto struct { |
||||
|
ackC chan<- error |
||||
|
} |
||||
|
|
||||
|
func (r cAuto) ack(err error) { |
||||
|
if r.ackC != nil { |
||||
|
defer func() { |
||||
|
recover() |
||||
|
}() |
||||
|
r.ackC <- err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
type cRange struct { |
||||
|
level int |
||||
|
min, max []byte |
||||
|
ackC chan<- error |
||||
|
} |
||||
|
|
||||
|
func (r cRange) ack(err error) { |
||||
|
if r.ackC != nil { |
||||
|
defer func() { |
||||
|
recover() |
||||
|
}() |
||||
|
r.ackC <- err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// This will trigger auto compaction but will not wait for it.
|
||||
|
func (db *DB) compTrigger(compC chan<- cCmd) { |
||||
|
select { |
||||
|
case compC <- cAuto{}: |
||||
|
default: |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// This will trigger auto compaction and/or wait for all compaction to be done.
|
||||
|
func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) { |
||||
|
ch := make(chan error) |
||||
|
defer close(ch) |
||||
|
// Send cmd.
|
||||
|
select { |
||||
|
case compC <- cAuto{ch}: |
||||
|
case err = <-db.compErrC: |
||||
|
return |
||||
|
case <-db.closeC: |
||||
|
return ErrClosed |
||||
|
} |
||||
|
// Wait cmd.
|
||||
|
select { |
||||
|
case err = <-ch: |
||||
|
case err = <-db.compErrC: |
||||
|
case <-db.closeC: |
||||
|
return ErrClosed |
||||
|
} |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// Send range compaction request.
|
||||
|
func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) { |
||||
|
ch := make(chan error) |
||||
|
defer close(ch) |
||||
|
// Send cmd.
|
||||
|
select { |
||||
|
case compC <- cRange{level, min, max, ch}: |
||||
|
case err := <-db.compErrC: |
||||
|
return err |
||||
|
case <-db.closeC: |
||||
|
return ErrClosed |
||||
|
} |
||||
|
// Wait cmd.
|
||||
|
select { |
||||
|
case err = <-ch: |
||||
|
case err = <-db.compErrC: |
||||
|
case <-db.closeC: |
||||
|
return ErrClosed |
||||
|
} |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
func (db *DB) mCompaction() { |
||||
|
var x cCmd |
||||
|
|
||||
|
defer func() { |
||||
|
if x := recover(); x != nil { |
||||
|
if x != errCompactionTransactExiting { |
||||
|
panic(x) |
||||
|
} |
||||
|
} |
||||
|
if x != nil { |
||||
|
x.ack(ErrClosed) |
||||
|
} |
||||
|
db.closeW.Done() |
||||
|
}() |
||||
|
|
||||
|
for { |
||||
|
select { |
||||
|
case x = <-db.mcompCmdC: |
||||
|
switch x.(type) { |
||||
|
case cAuto: |
||||
|
db.memCompaction() |
||||
|
x.ack(nil) |
||||
|
x = nil |
||||
|
default: |
||||
|
panic("leveldb: unknown command") |
||||
|
} |
||||
|
case <-db.closeC: |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (db *DB) tCompaction() { |
||||
|
var x cCmd |
||||
|
var ackQ []cCmd |
||||
|
|
||||
|
defer func() { |
||||
|
if x := recover(); x != nil { |
||||
|
if x != errCompactionTransactExiting { |
||||
|
panic(x) |
||||
|
} |
||||
|
} |
||||
|
for i := range ackQ { |
||||
|
ackQ[i].ack(ErrClosed) |
||||
|
ackQ[i] = nil |
||||
|
} |
||||
|
if x != nil { |
||||
|
x.ack(ErrClosed) |
||||
|
} |
||||
|
db.closeW.Done() |
||||
|
}() |
||||
|
|
||||
|
for { |
||||
|
if db.tableNeedCompaction() { |
||||
|
select { |
||||
|
case x = <-db.tcompCmdC: |
||||
|
case ch := <-db.tcompPauseC: |
||||
|
db.pauseCompaction(ch) |
||||
|
continue |
||||
|
case <-db.closeC: |
||||
|
return |
||||
|
default: |
||||
|
} |
||||
|
} else { |
||||
|
for i := range ackQ { |
||||
|
ackQ[i].ack(nil) |
||||
|
ackQ[i] = nil |
||||
|
} |
||||
|
ackQ = ackQ[:0] |
||||
|
select { |
||||
|
case x = <-db.tcompCmdC: |
||||
|
case ch := <-db.tcompPauseC: |
||||
|
db.pauseCompaction(ch) |
||||
|
continue |
||||
|
case <-db.closeC: |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
if x != nil { |
||||
|
switch cmd := x.(type) { |
||||
|
case cAuto: |
||||
|
ackQ = append(ackQ, x) |
||||
|
case cRange: |
||||
|
x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max)) |
||||
|
default: |
||||
|
panic("leveldb: unknown command") |
||||
|
} |
||||
|
x = nil |
||||
|
} |
||||
|
db.tableAutoCompaction() |
||||
|
} |
||||
|
} |
@ -0,0 +1,360 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"math/rand" |
||||
|
"runtime" |
||||
|
"sync" |
||||
|
"sync/atomic" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/iterator" |
||||
|
"github.com/syndtr/goleveldb/leveldb/opt" |
||||
|
"github.com/syndtr/goleveldb/leveldb/util" |
||||
|
) |
||||
|
|
||||
|
var ( |
||||
|
errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key") |
||||
|
) |
||||
|
|
||||
|
type memdbReleaser struct { |
||||
|
once sync.Once |
||||
|
m *memDB |
||||
|
} |
||||
|
|
||||
|
func (mr *memdbReleaser) Release() { |
||||
|
mr.once.Do(func() { |
||||
|
mr.m.decref() |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { |
||||
|
strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader) |
||||
|
em, fm := db.getMems() |
||||
|
v := db.s.version() |
||||
|
|
||||
|
tableIts := v.getIterators(slice, ro) |
||||
|
n := len(tableIts) + len(auxt) + 3 |
||||
|
its := make([]iterator.Iterator, 0, n) |
||||
|
|
||||
|
if auxm != nil { |
||||
|
ami := auxm.NewIterator(slice) |
||||
|
ami.SetReleaser(&memdbReleaser{m: auxm}) |
||||
|
its = append(its, ami) |
||||
|
} |
||||
|
for _, t := range auxt { |
||||
|
its = append(its, v.s.tops.newIterator(t, slice, ro)) |
||||
|
} |
||||
|
|
||||
|
emi := em.NewIterator(slice) |
||||
|
emi.SetReleaser(&memdbReleaser{m: em}) |
||||
|
its = append(its, emi) |
||||
|
if fm != nil { |
||||
|
fmi := fm.NewIterator(slice) |
||||
|
fmi.SetReleaser(&memdbReleaser{m: fm}) |
||||
|
its = append(its, fmi) |
||||
|
} |
||||
|
its = append(its, tableIts...) |
||||
|
mi := iterator.NewMergedIterator(its, db.s.icmp, strict) |
||||
|
mi.SetReleaser(&versionReleaser{v: v}) |
||||
|
return mi |
||||
|
} |
||||
|
|
||||
|
func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter { |
||||
|
var islice *util.Range |
||||
|
if slice != nil { |
||||
|
islice = &util.Range{} |
||||
|
if slice.Start != nil { |
||||
|
islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek) |
||||
|
} |
||||
|
if slice.Limit != nil { |
||||
|
islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek) |
||||
|
} |
||||
|
} |
||||
|
rawIter := db.newRawIterator(auxm, auxt, islice, ro) |
||||
|
iter := &dbIter{ |
||||
|
db: db, |
||||
|
icmp: db.s.icmp, |
||||
|
iter: rawIter, |
||||
|
seq: seq, |
||||
|
strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader), |
||||
|
key: make([]byte, 0), |
||||
|
value: make([]byte, 0), |
||||
|
} |
||||
|
atomic.AddInt32(&db.aliveIters, 1) |
||||
|
runtime.SetFinalizer(iter, (*dbIter).Release) |
||||
|
return iter |
||||
|
} |
||||
|
|
||||
|
func (db *DB) iterSamplingRate() int { |
||||
|
return rand.Intn(2 * db.s.o.GetIteratorSamplingRate()) |
||||
|
} |
||||
|
|
||||
|
type dir int |
||||
|
|
||||
|
const ( |
||||
|
dirReleased dir = iota - 1 |
||||
|
dirSOI |
||||
|
dirEOI |
||||
|
dirBackward |
||||
|
dirForward |
||||
|
) |
||||
|
|
||||
|
// dbIter represent an interator states over a database session.
|
||||
|
type dbIter struct { |
||||
|
db *DB |
||||
|
icmp *iComparer |
||||
|
iter iterator.Iterator |
||||
|
seq uint64 |
||||
|
strict bool |
||||
|
|
||||
|
smaplingGap int |
||||
|
dir dir |
||||
|
key []byte |
||||
|
value []byte |
||||
|
err error |
||||
|
releaser util.Releaser |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) sampleSeek() { |
||||
|
ikey := i.iter.Key() |
||||
|
i.smaplingGap -= len(ikey) + len(i.iter.Value()) |
||||
|
for i.smaplingGap < 0 { |
||||
|
i.smaplingGap += i.db.iterSamplingRate() |
||||
|
i.db.sampleSeek(ikey) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) setErr(err error) { |
||||
|
i.err = err |
||||
|
i.key = nil |
||||
|
i.value = nil |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) iterErr() { |
||||
|
if err := i.iter.Error(); err != nil { |
||||
|
i.setErr(err) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) Valid() bool { |
||||
|
return i.err == nil && i.dir > dirEOI |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) First() bool { |
||||
|
if i.err != nil { |
||||
|
return false |
||||
|
} else if i.dir == dirReleased { |
||||
|
i.err = ErrIterReleased |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
if i.iter.First() { |
||||
|
i.dir = dirSOI |
||||
|
return i.next() |
||||
|
} |
||||
|
i.dir = dirEOI |
||||
|
i.iterErr() |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) Last() bool { |
||||
|
if i.err != nil { |
||||
|
return false |
||||
|
} else if i.dir == dirReleased { |
||||
|
i.err = ErrIterReleased |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
if i.iter.Last() { |
||||
|
return i.prev() |
||||
|
} |
||||
|
i.dir = dirSOI |
||||
|
i.iterErr() |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) Seek(key []byte) bool { |
||||
|
if i.err != nil { |
||||
|
return false |
||||
|
} else if i.dir == dirReleased { |
||||
|
i.err = ErrIterReleased |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek) |
||||
|
if i.iter.Seek(ikey) { |
||||
|
i.dir = dirSOI |
||||
|
return i.next() |
||||
|
} |
||||
|
i.dir = dirEOI |
||||
|
i.iterErr() |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) next() bool { |
||||
|
for { |
||||
|
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { |
||||
|
i.sampleSeek() |
||||
|
if seq <= i.seq { |
||||
|
switch kt { |
||||
|
case keyTypeDel: |
||||
|
// Skip deleted key.
|
||||
|
i.key = append(i.key[:0], ukey...) |
||||
|
i.dir = dirForward |
||||
|
case keyTypeVal: |
||||
|
if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 { |
||||
|
i.key = append(i.key[:0], ukey...) |
||||
|
i.value = append(i.value[:0], i.iter.Value()...) |
||||
|
i.dir = dirForward |
||||
|
return true |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
} else if i.strict { |
||||
|
i.setErr(kerr) |
||||
|
break |
||||
|
} |
||||
|
if !i.iter.Next() { |
||||
|
i.dir = dirEOI |
||||
|
i.iterErr() |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) Next() bool { |
||||
|
if i.dir == dirEOI || i.err != nil { |
||||
|
return false |
||||
|
} else if i.dir == dirReleased { |
||||
|
i.err = ErrIterReleased |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) { |
||||
|
i.dir = dirEOI |
||||
|
i.iterErr() |
||||
|
return false |
||||
|
} |
||||
|
return i.next() |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) prev() bool { |
||||
|
i.dir = dirBackward |
||||
|
del := true |
||||
|
if i.iter.Valid() { |
||||
|
for { |
||||
|
if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil { |
||||
|
i.sampleSeek() |
||||
|
if seq <= i.seq { |
||||
|
if !del && i.icmp.uCompare(ukey, i.key) < 0 { |
||||
|
return true |
||||
|
} |
||||
|
del = (kt == keyTypeDel) |
||||
|
if !del { |
||||
|
i.key = append(i.key[:0], ukey...) |
||||
|
i.value = append(i.value[:0], i.iter.Value()...) |
||||
|
} |
||||
|
} |
||||
|
} else if i.strict { |
||||
|
i.setErr(kerr) |
||||
|
return false |
||||
|
} |
||||
|
if !i.iter.Prev() { |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
if del { |
||||
|
i.dir = dirSOI |
||||
|
i.iterErr() |
||||
|
return false |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) Prev() bool { |
||||
|
if i.dir == dirSOI || i.err != nil { |
||||
|
return false |
||||
|
} else if i.dir == dirReleased { |
||||
|
i.err = ErrIterReleased |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
switch i.dir { |
||||
|
case dirEOI: |
||||
|
return i.Last() |
||||
|
case dirForward: |
||||
|
for i.iter.Prev() { |
||||
|
if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil { |
||||
|
i.sampleSeek() |
||||
|
if i.icmp.uCompare(ukey, i.key) < 0 { |
||||
|
goto cont |
||||
|
} |
||||
|
} else if i.strict { |
||||
|
i.setErr(kerr) |
||||
|
return false |
||||
|
} |
||||
|
} |
||||
|
i.dir = dirSOI |
||||
|
i.iterErr() |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
cont: |
||||
|
return i.prev() |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) Key() []byte { |
||||
|
if i.err != nil || i.dir <= dirEOI { |
||||
|
return nil |
||||
|
} |
||||
|
return i.key |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) Value() []byte { |
||||
|
if i.err != nil || i.dir <= dirEOI { |
||||
|
return nil |
||||
|
} |
||||
|
return i.value |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) Release() { |
||||
|
if i.dir != dirReleased { |
||||
|
// Clear the finalizer.
|
||||
|
runtime.SetFinalizer(i, nil) |
||||
|
|
||||
|
if i.releaser != nil { |
||||
|
i.releaser.Release() |
||||
|
i.releaser = nil |
||||
|
} |
||||
|
|
||||
|
i.dir = dirReleased |
||||
|
i.key = nil |
||||
|
i.value = nil |
||||
|
i.iter.Release() |
||||
|
i.iter = nil |
||||
|
atomic.AddInt32(&i.db.aliveIters, -1) |
||||
|
i.db = nil |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) SetReleaser(releaser util.Releaser) { |
||||
|
if i.dir == dirReleased { |
||||
|
panic(util.ErrReleased) |
||||
|
} |
||||
|
if i.releaser != nil && releaser != nil { |
||||
|
panic(util.ErrHasReleaser) |
||||
|
} |
||||
|
i.releaser = releaser |
||||
|
} |
||||
|
|
||||
|
func (i *dbIter) Error() error { |
||||
|
return i.err |
||||
|
} |
@ -0,0 +1,183 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"container/list" |
||||
|
"fmt" |
||||
|
"runtime" |
||||
|
"sync" |
||||
|
"sync/atomic" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/iterator" |
||||
|
"github.com/syndtr/goleveldb/leveldb/opt" |
||||
|
"github.com/syndtr/goleveldb/leveldb/util" |
||||
|
) |
||||
|
|
||||
|
type snapshotElement struct { |
||||
|
seq uint64 |
||||
|
ref int |
||||
|
e *list.Element |
||||
|
} |
||||
|
|
||||
|
// Acquires a snapshot, based on latest sequence.
|
||||
|
func (db *DB) acquireSnapshot() *snapshotElement { |
||||
|
db.snapsMu.Lock() |
||||
|
defer db.snapsMu.Unlock() |
||||
|
|
||||
|
seq := db.getSeq() |
||||
|
|
||||
|
if e := db.snapsList.Back(); e != nil { |
||||
|
se := e.Value.(*snapshotElement) |
||||
|
if se.seq == seq { |
||||
|
se.ref++ |
||||
|
return se |
||||
|
} else if seq < se.seq { |
||||
|
panic("leveldb: sequence number is not increasing") |
||||
|
} |
||||
|
} |
||||
|
se := &snapshotElement{seq: seq, ref: 1} |
||||
|
se.e = db.snapsList.PushBack(se) |
||||
|
return se |
||||
|
} |
||||
|
|
||||
|
// Releases given snapshot element.
|
||||
|
func (db *DB) releaseSnapshot(se *snapshotElement) { |
||||
|
db.snapsMu.Lock() |
||||
|
defer db.snapsMu.Unlock() |
||||
|
|
||||
|
se.ref-- |
||||
|
if se.ref == 0 { |
||||
|
db.snapsList.Remove(se.e) |
||||
|
se.e = nil |
||||
|
} else if se.ref < 0 { |
||||
|
panic("leveldb: Snapshot: negative element reference") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Gets minimum sequence that not being snapshotted.
|
||||
|
func (db *DB) minSeq() uint64 { |
||||
|
db.snapsMu.Lock() |
||||
|
defer db.snapsMu.Unlock() |
||||
|
|
||||
|
if e := db.snapsList.Front(); e != nil { |
||||
|
return e.Value.(*snapshotElement).seq |
||||
|
} |
||||
|
|
||||
|
return db.getSeq() |
||||
|
} |
||||
|
|
||||
|
// Snapshot is a DB snapshot.
|
||||
|
type Snapshot struct { |
||||
|
db *DB |
||||
|
elem *snapshotElement |
||||
|
mu sync.RWMutex |
||||
|
released bool |
||||
|
} |
||||
|
|
||||
|
// Creates new snapshot object.
|
||||
|
func (db *DB) newSnapshot() *Snapshot { |
||||
|
snap := &Snapshot{ |
||||
|
db: db, |
||||
|
elem: db.acquireSnapshot(), |
||||
|
} |
||||
|
atomic.AddInt32(&db.aliveSnaps, 1) |
||||
|
runtime.SetFinalizer(snap, (*Snapshot).Release) |
||||
|
return snap |
||||
|
} |
||||
|
|
||||
|
func (snap *Snapshot) String() string { |
||||
|
return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq) |
||||
|
} |
||||
|
|
||||
|
// Get gets the value for the given key. It returns ErrNotFound if
|
||||
|
// the DB does not contains the key.
|
||||
|
//
|
||||
|
// The caller should not modify the contents of the returned slice, but
|
||||
|
// it is safe to modify the contents of the argument after Get returns.
|
||||
|
func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) { |
||||
|
err = snap.db.ok() |
||||
|
if err != nil { |
||||
|
return |
||||
|
} |
||||
|
snap.mu.RLock() |
||||
|
defer snap.mu.RUnlock() |
||||
|
if snap.released { |
||||
|
err = ErrSnapshotReleased |
||||
|
return |
||||
|
} |
||||
|
return snap.db.get(nil, nil, key, snap.elem.seq, ro) |
||||
|
} |
||||
|
|
||||
|
// Has returns true if the DB does contains the given key.
|
||||
|
//
|
||||
|
// It is safe to modify the contents of the argument after Get returns.
|
||||
|
func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) { |
||||
|
err = snap.db.ok() |
||||
|
if err != nil { |
||||
|
return |
||||
|
} |
||||
|
snap.mu.RLock() |
||||
|
defer snap.mu.RUnlock() |
||||
|
if snap.released { |
||||
|
err = ErrSnapshotReleased |
||||
|
return |
||||
|
} |
||||
|
return snap.db.has(nil, nil, key, snap.elem.seq, ro) |
||||
|
} |
||||
|
|
||||
|
// NewIterator returns an iterator for the snapshot of the underlying DB.
|
||||
|
// The returned iterator is not safe for concurrent use, but it is safe to use
|
||||
|
// multiple iterators concurrently, with each in a dedicated goroutine.
|
||||
|
// It is also safe to use an iterator concurrently with modifying its
|
||||
|
// underlying DB. The resultant key/value pairs are guaranteed to be
|
||||
|
// consistent.
|
||||
|
//
|
||||
|
// Slice allows slicing the iterator to only contains keys in the given
|
||||
|
// range. A nil Range.Start is treated as a key before all keys in the
|
||||
|
// DB. And a nil Range.Limit is treated as a key after all keys in
|
||||
|
// the DB.
|
||||
|
//
|
||||
|
// The iterator must be released after use, by calling Release method.
|
||||
|
// Releasing the snapshot doesn't mean releasing the iterator too, the
|
||||
|
// iterator would be still valid until released.
|
||||
|
//
|
||||
|
// Also read Iterator documentation of the leveldb/iterator package.
|
||||
|
func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { |
||||
|
if err := snap.db.ok(); err != nil { |
||||
|
return iterator.NewEmptyIterator(err) |
||||
|
} |
||||
|
snap.mu.Lock() |
||||
|
defer snap.mu.Unlock() |
||||
|
if snap.released { |
||||
|
return iterator.NewEmptyIterator(ErrSnapshotReleased) |
||||
|
} |
||||
|
// Since iterator already hold version ref, it doesn't need to
|
||||
|
// hold snapshot ref.
|
||||
|
return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro) |
||||
|
} |
||||
|
|
||||
|
// Release releases the snapshot. This will not release any returned
|
||||
|
// iterators, the iterators would still be valid until released or the
|
||||
|
// underlying DB is closed.
|
||||
|
//
|
||||
|
// Other methods should not be called after the snapshot has been released.
|
||||
|
func (snap *Snapshot) Release() { |
||||
|
snap.mu.Lock() |
||||
|
defer snap.mu.Unlock() |
||||
|
|
||||
|
if !snap.released { |
||||
|
// Clear the finalizer.
|
||||
|
runtime.SetFinalizer(snap, nil) |
||||
|
|
||||
|
snap.released = true |
||||
|
snap.db.releaseSnapshot(snap.elem) |
||||
|
atomic.AddInt32(&snap.db.aliveSnaps, -1) |
||||
|
snap.db = nil |
||||
|
snap.elem = nil |
||||
|
} |
||||
|
} |
@ -0,0 +1,234 @@ |
|||||
|
// Copyright (c) 2013, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"sync/atomic" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/journal" |
||||
|
"github.com/syndtr/goleveldb/leveldb/memdb" |
||||
|
"github.com/syndtr/goleveldb/leveldb/storage" |
||||
|
) |
||||
|
|
||||
|
type memDB struct { |
||||
|
db *DB |
||||
|
*memdb.DB |
||||
|
ref int32 |
||||
|
} |
||||
|
|
||||
|
func (m *memDB) getref() int32 { |
||||
|
return atomic.LoadInt32(&m.ref) |
||||
|
} |
||||
|
|
||||
|
func (m *memDB) incref() { |
||||
|
atomic.AddInt32(&m.ref, 1) |
||||
|
} |
||||
|
|
||||
|
func (m *memDB) decref() { |
||||
|
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 { |
||||
|
// Only put back memdb with std capacity.
|
||||
|
if m.Capacity() == m.db.s.o.GetWriteBuffer() { |
||||
|
m.Reset() |
||||
|
m.db.mpoolPut(m.DB) |
||||
|
} |
||||
|
m.db = nil |
||||
|
m.DB = nil |
||||
|
} else if ref < 0 { |
||||
|
panic("negative memdb ref") |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Get latest sequence number.
|
||||
|
func (db *DB) getSeq() uint64 { |
||||
|
return atomic.LoadUint64(&db.seq) |
||||
|
} |
||||
|
|
||||
|
// Atomically adds delta to seq.
|
||||
|
func (db *DB) addSeq(delta uint64) { |
||||
|
atomic.AddUint64(&db.seq, delta) |
||||
|
} |
||||
|
|
||||
|
func (db *DB) setSeq(seq uint64) { |
||||
|
atomic.StoreUint64(&db.seq, seq) |
||||
|
} |
||||
|
|
||||
|
func (db *DB) sampleSeek(ikey internalKey) { |
||||
|
v := db.s.version() |
||||
|
if v.sampleSeek(ikey) { |
||||
|
// Trigger table compaction.
|
||||
|
db.compTrigger(db.tcompCmdC) |
||||
|
} |
||||
|
v.release() |
||||
|
} |
||||
|
|
||||
|
func (db *DB) mpoolPut(mem *memdb.DB) { |
||||
|
if !db.isClosed() { |
||||
|
select { |
||||
|
case db.memPool <- mem: |
||||
|
default: |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (db *DB) mpoolGet(n int) *memDB { |
||||
|
var mdb *memdb.DB |
||||
|
select { |
||||
|
case mdb = <-db.memPool: |
||||
|
default: |
||||
|
} |
||||
|
if mdb == nil || mdb.Capacity() < n { |
||||
|
mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n)) |
||||
|
} |
||||
|
return &memDB{ |
||||
|
db: db, |
||||
|
DB: mdb, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
func (db *DB) mpoolDrain() { |
||||
|
ticker := time.NewTicker(30 * time.Second) |
||||
|
for { |
||||
|
select { |
||||
|
case <-ticker.C: |
||||
|
select { |
||||
|
case <-db.memPool: |
||||
|
default: |
||||
|
} |
||||
|
case <-db.closeC: |
||||
|
ticker.Stop() |
||||
|
// Make sure the pool is drained.
|
||||
|
select { |
||||
|
case <-db.memPool: |
||||
|
case <-time.After(time.Second): |
||||
|
} |
||||
|
close(db.memPool) |
||||
|
return |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Create new memdb and froze the old one; need external synchronization.
|
||||
|
// newMem only called synchronously by the writer.
|
||||
|
func (db *DB) newMem(n int) (mem *memDB, err error) { |
||||
|
fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()} |
||||
|
w, err := db.s.stor.Create(fd) |
||||
|
if err != nil { |
||||
|
db.s.reuseFileNum(fd.Num) |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
db.memMu.Lock() |
||||
|
defer db.memMu.Unlock() |
||||
|
|
||||
|
if db.frozenMem != nil { |
||||
|
panic("still has frozen mem") |
||||
|
} |
||||
|
|
||||
|
if db.journal == nil { |
||||
|
db.journal = journal.NewWriter(w) |
||||
|
} else { |
||||
|
db.journal.Reset(w) |
||||
|
db.journalWriter.Close() |
||||
|
db.frozenJournalFd = db.journalFd |
||||
|
} |
||||
|
db.journalWriter = w |
||||
|
db.journalFd = fd |
||||
|
db.frozenMem = db.mem |
||||
|
mem = db.mpoolGet(n) |
||||
|
mem.incref() // for self
|
||||
|
mem.incref() // for caller
|
||||
|
db.mem = mem |
||||
|
// The seq only incremented by the writer. And whoever called newMem
|
||||
|
// should hold write lock, so no need additional synchronization here.
|
||||
|
db.frozenSeq = db.seq |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// Get all memdbs.
|
||||
|
func (db *DB) getMems() (e, f *memDB) { |
||||
|
db.memMu.RLock() |
||||
|
defer db.memMu.RUnlock() |
||||
|
if db.mem != nil { |
||||
|
db.mem.incref() |
||||
|
} else if !db.isClosed() { |
||||
|
panic("nil effective mem") |
||||
|
} |
||||
|
if db.frozenMem != nil { |
||||
|
db.frozenMem.incref() |
||||
|
} |
||||
|
return db.mem, db.frozenMem |
||||
|
} |
||||
|
|
||||
|
// Get effective memdb.
|
||||
|
func (db *DB) getEffectiveMem() *memDB { |
||||
|
db.memMu.RLock() |
||||
|
defer db.memMu.RUnlock() |
||||
|
if db.mem != nil { |
||||
|
db.mem.incref() |
||||
|
} else if !db.isClosed() { |
||||
|
panic("nil effective mem") |
||||
|
} |
||||
|
return db.mem |
||||
|
} |
||||
|
|
||||
|
// Check whether we has frozen memdb.
|
||||
|
func (db *DB) hasFrozenMem() bool { |
||||
|
db.memMu.RLock() |
||||
|
defer db.memMu.RUnlock() |
||||
|
return db.frozenMem != nil |
||||
|
} |
||||
|
|
||||
|
// Get frozen memdb.
|
||||
|
func (db *DB) getFrozenMem() *memDB { |
||||
|
db.memMu.RLock() |
||||
|
defer db.memMu.RUnlock() |
||||
|
if db.frozenMem != nil { |
||||
|
db.frozenMem.incref() |
||||
|
} |
||||
|
return db.frozenMem |
||||
|
} |
||||
|
|
||||
|
// Drop frozen memdb; assume that frozen memdb isn't nil.
|
||||
|
func (db *DB) dropFrozenMem() { |
||||
|
db.memMu.Lock() |
||||
|
if err := db.s.stor.Remove(db.frozenJournalFd); err != nil { |
||||
|
db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err) |
||||
|
} else { |
||||
|
db.logf("journal@remove removed @%d", db.frozenJournalFd.Num) |
||||
|
} |
||||
|
db.frozenJournalFd = storage.FileDesc{} |
||||
|
db.frozenMem.decref() |
||||
|
db.frozenMem = nil |
||||
|
db.memMu.Unlock() |
||||
|
} |
||||
|
|
||||
|
// Clear mems ptr; used by DB.Close().
|
||||
|
func (db *DB) clearMems() { |
||||
|
db.memMu.Lock() |
||||
|
db.mem = nil |
||||
|
db.frozenMem = nil |
||||
|
db.memMu.Unlock() |
||||
|
} |
||||
|
|
||||
|
// Set closed flag; return true if not already closed.
|
||||
|
func (db *DB) setClosed() bool { |
||||
|
return atomic.CompareAndSwapUint32(&db.closed, 0, 1) |
||||
|
} |
||||
|
|
||||
|
// Check whether DB was closed.
|
||||
|
func (db *DB) isClosed() bool { |
||||
|
return atomic.LoadUint32(&db.closed) != 0 |
||||
|
} |
||||
|
|
||||
|
// Check read ok status.
|
||||
|
func (db *DB) ok() error { |
||||
|
if db.isClosed() { |
||||
|
return ErrClosed |
||||
|
} |
||||
|
return nil |
||||
|
} |
2925
vendor/src/github.com/syndtr/goleveldb/leveldb/db_test.go
File diff suppressed because it is too large
View File
File diff suppressed because it is too large
View File
@ -0,0 +1,325 @@ |
|||||
|
// Copyright (c) 2016, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"sync" |
||||
|
"time" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/iterator" |
||||
|
"github.com/syndtr/goleveldb/leveldb/opt" |
||||
|
"github.com/syndtr/goleveldb/leveldb/util" |
||||
|
) |
||||
|
|
||||
|
var errTransactionDone = errors.New("leveldb: transaction already closed") |
||||
|
|
||||
|
// Transaction is the transaction handle.
|
||||
|
type Transaction struct { |
||||
|
db *DB |
||||
|
lk sync.RWMutex |
||||
|
seq uint64 |
||||
|
mem *memDB |
||||
|
tables tFiles |
||||
|
ikScratch []byte |
||||
|
rec sessionRecord |
||||
|
stats cStatStaging |
||||
|
closed bool |
||||
|
} |
||||
|
|
||||
|
// Get gets the value for the given key. It returns ErrNotFound if the
|
||||
|
// DB does not contains the key.
|
||||
|
//
|
||||
|
// The returned slice is its own copy, it is safe to modify the contents
|
||||
|
// of the returned slice.
|
||||
|
// It is safe to modify the contents of the argument after Get returns.
|
||||
|
func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) { |
||||
|
tr.lk.RLock() |
||||
|
defer tr.lk.RUnlock() |
||||
|
if tr.closed { |
||||
|
return nil, errTransactionDone |
||||
|
} |
||||
|
return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro) |
||||
|
} |
||||
|
|
||||
|
// Has returns true if the DB does contains the given key.
|
||||
|
//
|
||||
|
// It is safe to modify the contents of the argument after Has returns.
|
||||
|
func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) { |
||||
|
tr.lk.RLock() |
||||
|
defer tr.lk.RUnlock() |
||||
|
if tr.closed { |
||||
|
return false, errTransactionDone |
||||
|
} |
||||
|
return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro) |
||||
|
} |
||||
|
|
||||
|
// NewIterator returns an iterator for the latest snapshot of the transaction.
|
||||
|
// The returned iterator is not safe for concurrent use, but it is safe to use
|
||||
|
// multiple iterators concurrently, with each in a dedicated goroutine.
|
||||
|
// It is also safe to use an iterator concurrently while writes to the
|
||||
|
// transaction. The resultant key/value pairs are guaranteed to be consistent.
|
||||
|
//
|
||||
|
// Slice allows slicing the iterator to only contains keys in the given
|
||||
|
// range. A nil Range.Start is treated as a key before all keys in the
|
||||
|
// DB. And a nil Range.Limit is treated as a key after all keys in
|
||||
|
// the DB.
|
||||
|
//
|
||||
|
// The iterator must be released after use, by calling Release method.
|
||||
|
//
|
||||
|
// Also read Iterator documentation of the leveldb/iterator package.
|
||||
|
func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { |
||||
|
tr.lk.RLock() |
||||
|
defer tr.lk.RUnlock() |
||||
|
if tr.closed { |
||||
|
return iterator.NewEmptyIterator(errTransactionDone) |
||||
|
} |
||||
|
tr.mem.incref() |
||||
|
return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro) |
||||
|
} |
||||
|
|
||||
|
func (tr *Transaction) flush() error { |
||||
|
// Flush memdb.
|
||||
|
if tr.mem.Len() != 0 { |
||||
|
tr.stats.startTimer() |
||||
|
iter := tr.mem.NewIterator(nil) |
||||
|
t, n, err := tr.db.s.tops.createFrom(iter) |
||||
|
iter.Release() |
||||
|
tr.stats.stopTimer() |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if tr.mem.getref() == 1 { |
||||
|
tr.mem.Reset() |
||||
|
} else { |
||||
|
tr.mem.decref() |
||||
|
tr.mem = tr.db.mpoolGet(0) |
||||
|
tr.mem.incref() |
||||
|
} |
||||
|
tr.tables = append(tr.tables, t) |
||||
|
tr.rec.addTableFile(0, t) |
||||
|
tr.stats.write += t.size |
||||
|
tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (tr *Transaction) put(kt keyType, key, value []byte) error { |
||||
|
tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt) |
||||
|
if tr.mem.Free() < len(tr.ikScratch)+len(value) { |
||||
|
if err := tr.flush(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
if err := tr.mem.Put(tr.ikScratch, value); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
tr.seq++ |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// Put sets the value for the given key. It overwrites any previous value
|
||||
|
// for that key; a DB is not a multi-map.
|
||||
|
// Please note that the transaction is not compacted until committed, so if you
|
||||
|
// writes 10 same keys, then those 10 same keys are in the transaction.
|
||||
|
//
|
||||
|
// It is safe to modify the contents of the arguments after Put returns.
|
||||
|
func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error { |
||||
|
tr.lk.Lock() |
||||
|
defer tr.lk.Unlock() |
||||
|
if tr.closed { |
||||
|
return errTransactionDone |
||||
|
} |
||||
|
return tr.put(keyTypeVal, key, value) |
||||
|
} |
||||
|
|
||||
|
// Delete deletes the value for the given key.
|
||||
|
// Please note that the transaction is not compacted until committed, so if you
|
||||
|
// writes 10 same keys, then those 10 same keys are in the transaction.
|
||||
|
//
|
||||
|
// It is safe to modify the contents of the arguments after Delete returns.
|
||||
|
func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error { |
||||
|
tr.lk.Lock() |
||||
|
defer tr.lk.Unlock() |
||||
|
if tr.closed { |
||||
|
return errTransactionDone |
||||
|
} |
||||
|
return tr.put(keyTypeDel, key, nil) |
||||
|
} |
||||
|
|
||||
|
// Write apply the given batch to the transaction. The batch will be applied
|
||||
|
// sequentially.
|
||||
|
// Please note that the transaction is not compacted until committed, so if you
|
||||
|
// writes 10 same keys, then those 10 same keys are in the transaction.
|
||||
|
//
|
||||
|
// It is safe to modify the contents of the arguments after Write returns.
|
||||
|
func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error { |
||||
|
if b == nil || b.Len() == 0 { |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
tr.lk.Lock() |
||||
|
defer tr.lk.Unlock() |
||||
|
if tr.closed { |
||||
|
return errTransactionDone |
||||
|
} |
||||
|
return b.replayInternal(func(i int, kt keyType, k, v []byte) error { |
||||
|
return tr.put(kt, k, v) |
||||
|
}) |
||||
|
} |
||||
|
|
||||
|
func (tr *Transaction) setDone() { |
||||
|
tr.closed = true |
||||
|
tr.db.tr = nil |
||||
|
tr.mem.decref() |
||||
|
<-tr.db.writeLockC |
||||
|
} |
||||
|
|
||||
|
// Commit commits the transaction. If error is not nil, then the transaction is
|
||||
|
// not committed, it can then either be retried or discarded.
|
||||
|
//
|
||||
|
// Other methods should not be called after transaction has been committed.
|
||||
|
func (tr *Transaction) Commit() error { |
||||
|
if err := tr.db.ok(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
tr.lk.Lock() |
||||
|
defer tr.lk.Unlock() |
||||
|
if tr.closed { |
||||
|
return errTransactionDone |
||||
|
} |
||||
|
if err := tr.flush(); err != nil { |
||||
|
// Return error, lets user decide either to retry or discard
|
||||
|
// transaction.
|
||||
|
return err |
||||
|
} |
||||
|
if len(tr.tables) != 0 { |
||||
|
// Committing transaction.
|
||||
|
tr.rec.setSeqNum(tr.seq) |
||||
|
tr.db.compCommitLk.Lock() |
||||
|
tr.stats.startTimer() |
||||
|
var cerr error |
||||
|
for retry := 0; retry < 3; retry++ { |
||||
|
cerr = tr.db.s.commit(&tr.rec) |
||||
|
if cerr != nil { |
||||
|
tr.db.logf("transaction@commit error R·%d %q", retry, cerr) |
||||
|
select { |
||||
|
case <-time.After(time.Second): |
||||
|
case <-tr.db.closeC: |
||||
|
tr.db.logf("transaction@commit exiting") |
||||
|
tr.db.compCommitLk.Unlock() |
||||
|
return cerr |
||||
|
} |
||||
|
} else { |
||||
|
// Success. Set db.seq.
|
||||
|
tr.db.setSeq(tr.seq) |
||||
|
break |
||||
|
} |
||||
|
} |
||||
|
tr.stats.stopTimer() |
||||
|
if cerr != nil { |
||||
|
// Return error, lets user decide either to retry or discard
|
||||
|
// transaction.
|
||||
|
return cerr |
||||
|
} |
||||
|
|
||||
|
// Update compaction stats. This is safe as long as we hold compCommitLk.
|
||||
|
tr.db.compStats.addStat(0, &tr.stats) |
||||
|
|
||||
|
// Trigger table auto-compaction.
|
||||
|
tr.db.compTrigger(tr.db.tcompCmdC) |
||||
|
tr.db.compCommitLk.Unlock() |
||||
|
|
||||
|
// Additionally, wait compaction when certain threshold reached.
|
||||
|
// Ignore error, returns error only if transaction can't be committed.
|
||||
|
tr.db.waitCompaction() |
||||
|
} |
||||
|
// Only mark as done if transaction committed successfully.
|
||||
|
tr.setDone() |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (tr *Transaction) discard() { |
||||
|
// Discard transaction.
|
||||
|
for _, t := range tr.tables { |
||||
|
tr.db.logf("transaction@discard @%d", t.fd.Num) |
||||
|
if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil { |
||||
|
tr.db.s.reuseFileNum(t.fd.Num) |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Discard discards the transaction.
|
||||
|
//
|
||||
|
// Other methods should not be called after transaction has been discarded.
|
||||
|
func (tr *Transaction) Discard() { |
||||
|
tr.lk.Lock() |
||||
|
if !tr.closed { |
||||
|
tr.discard() |
||||
|
tr.setDone() |
||||
|
} |
||||
|
tr.lk.Unlock() |
||||
|
} |
||||
|
|
||||
|
func (db *DB) waitCompaction() error { |
||||
|
if db.s.tLen(0) >= db.s.o.GetWriteL0PauseTrigger() { |
||||
|
return db.compTriggerWait(db.tcompCmdC) |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// OpenTransaction opens an atomic DB transaction. Only one transaction can be
|
||||
|
// opened at a time. Subsequent call to Write and OpenTransaction will be blocked
|
||||
|
// until in-flight transaction is committed or discarded.
|
||||
|
// The returned transaction handle is safe for concurrent use.
|
||||
|
//
|
||||
|
// Transaction is expensive and can overwhelm compaction, especially if
|
||||
|
// transaction size is small. Use with caution.
|
||||
|
//
|
||||
|
// The transaction must be closed once done, either by committing or discarding
|
||||
|
// the transaction.
|
||||
|
// Closing the DB will discard open transaction.
|
||||
|
func (db *DB) OpenTransaction() (*Transaction, error) { |
||||
|
if err := db.ok(); err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
// The write happen synchronously.
|
||||
|
select { |
||||
|
case db.writeLockC <- struct{}{}: |
||||
|
case err := <-db.compPerErrC: |
||||
|
return nil, err |
||||
|
case <-db.closeC: |
||||
|
return nil, ErrClosed |
||||
|
} |
||||
|
|
||||
|
if db.tr != nil { |
||||
|
panic("leveldb: has open transaction") |
||||
|
} |
||||
|
|
||||
|
// Flush current memdb.
|
||||
|
if db.mem != nil && db.mem.Len() != 0 { |
||||
|
if _, err := db.rotateMem(0, true); err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Wait compaction when certain threshold reached.
|
||||
|
if err := db.waitCompaction(); err != nil { |
||||
|
return nil, err |
||||
|
} |
||||
|
|
||||
|
tr := &Transaction{ |
||||
|
db: db, |
||||
|
seq: db.seq, |
||||
|
mem: db.mpoolGet(0), |
||||
|
} |
||||
|
tr.mem.incref() |
||||
|
db.tr = tr |
||||
|
return tr, nil |
||||
|
} |
@ -0,0 +1,102 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"github.com/syndtr/goleveldb/leveldb/errors" |
||||
|
"github.com/syndtr/goleveldb/leveldb/iterator" |
||||
|
"github.com/syndtr/goleveldb/leveldb/opt" |
||||
|
"github.com/syndtr/goleveldb/leveldb/storage" |
||||
|
"github.com/syndtr/goleveldb/leveldb/util" |
||||
|
) |
||||
|
|
||||
|
// Reader is the interface that wraps basic Get and NewIterator methods.
|
||||
|
// This interface implemented by both DB and Snapshot.
|
||||
|
type Reader interface { |
||||
|
Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) |
||||
|
NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator |
||||
|
} |
||||
|
|
||||
|
// Sizes is list of size.
|
||||
|
type Sizes []int64 |
||||
|
|
||||
|
// Sum returns sum of the sizes.
|
||||
|
func (sizes Sizes) Sum() int64 { |
||||
|
var sum int64 |
||||
|
for _, size := range sizes { |
||||
|
sum += size |
||||
|
} |
||||
|
return sum |
||||
|
} |
||||
|
|
||||
|
// Logging.
|
||||
|
func (db *DB) log(v ...interface{}) { db.s.log(v...) } |
||||
|
func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) } |
||||
|
|
||||
|
// Check and clean files.
|
||||
|
func (db *DB) checkAndCleanFiles() error { |
||||
|
v := db.s.version() |
||||
|
defer v.release() |
||||
|
|
||||
|
tmap := make(map[int64]bool) |
||||
|
for _, tables := range v.levels { |
||||
|
for _, t := range tables { |
||||
|
tmap[t.fd.Num] = false |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
fds, err := db.s.stor.List(storage.TypeAll) |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
var nt int |
||||
|
var rem []storage.FileDesc |
||||
|
for _, fd := range fds { |
||||
|
keep := true |
||||
|
switch fd.Type { |
||||
|
case storage.TypeManifest: |
||||
|
keep = fd.Num >= db.s.manifestFd.Num |
||||
|
case storage.TypeJournal: |
||||
|
if !db.frozenJournalFd.Zero() { |
||||
|
keep = fd.Num >= db.frozenJournalFd.Num |
||||
|
} else { |
||||
|
keep = fd.Num >= db.journalFd.Num |
||||
|
} |
||||
|
case storage.TypeTable: |
||||
|
_, keep = tmap[fd.Num] |
||||
|
if keep { |
||||
|
tmap[fd.Num] = true |
||||
|
nt++ |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if !keep { |
||||
|
rem = append(rem, fd) |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if nt != len(tmap) { |
||||
|
var mfds []storage.FileDesc |
||||
|
for num, present := range tmap { |
||||
|
if !present { |
||||
|
mfds = append(mfds, storage.FileDesc{storage.TypeTable, num}) |
||||
|
db.logf("db@janitor table missing @%d", num) |
||||
|
} |
||||
|
} |
||||
|
return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds}) |
||||
|
} |
||||
|
|
||||
|
db.logf("db@janitor F·%d G·%d", len(fds), len(rem)) |
||||
|
for _, fd := range rem { |
||||
|
db.logf("db@janitor removing %s-%d", fd.Type, fd.Num) |
||||
|
if err := db.s.stor.Remove(fd); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} |
||||
|
return nil |
||||
|
} |
@ -0,0 +1,443 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"time" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/memdb" |
||||
|
"github.com/syndtr/goleveldb/leveldb/opt" |
||||
|
"github.com/syndtr/goleveldb/leveldb/util" |
||||
|
) |
||||
|
|
||||
|
func (db *DB) writeJournal(batches []*Batch, seq uint64, sync bool) error { |
||||
|
wr, err := db.journal.Next() |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := writeBatchesWithHeader(wr, batches, seq); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := db.journal.Flush(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if sync { |
||||
|
return db.journalWriter.Sync() |
||||
|
} |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) { |
||||
|
// Wait for pending memdb compaction.
|
||||
|
err = db.compTriggerWait(db.mcompCmdC) |
||||
|
if err != nil { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// Create new memdb and journal.
|
||||
|
mem, err = db.newMem(n) |
||||
|
if err != nil { |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
// Schedule memdb compaction.
|
||||
|
if wait { |
||||
|
err = db.compTriggerWait(db.mcompCmdC) |
||||
|
} else { |
||||
|
db.compTrigger(db.mcompCmdC) |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) { |
||||
|
delayed := false |
||||
|
slowdownTrigger := db.s.o.GetWriteL0SlowdownTrigger() |
||||
|
pauseTrigger := db.s.o.GetWriteL0PauseTrigger() |
||||
|
flush := func() (retry bool) { |
||||
|
mdb = db.getEffectiveMem() |
||||
|
if mdb == nil { |
||||
|
err = ErrClosed |
||||
|
return false |
||||
|
} |
||||
|
defer func() { |
||||
|
if retry { |
||||
|
mdb.decref() |
||||
|
mdb = nil |
||||
|
} |
||||
|
}() |
||||
|
tLen := db.s.tLen(0) |
||||
|
mdbFree = mdb.Free() |
||||
|
switch { |
||||
|
case tLen >= slowdownTrigger && !delayed: |
||||
|
delayed = true |
||||
|
time.Sleep(time.Millisecond) |
||||
|
case mdbFree >= n: |
||||
|
return false |
||||
|
case tLen >= pauseTrigger: |
||||
|
delayed = true |
||||
|
err = db.compTriggerWait(db.tcompCmdC) |
||||
|
if err != nil { |
||||
|
return false |
||||
|
} |
||||
|
default: |
||||
|
// Allow memdb to grow if it has no entry.
|
||||
|
if mdb.Len() == 0 { |
||||
|
mdbFree = n |
||||
|
} else { |
||||
|
mdb.decref() |
||||
|
mdb, err = db.rotateMem(n, false) |
||||
|
if err == nil { |
||||
|
mdbFree = mdb.Free() |
||||
|
} else { |
||||
|
mdbFree = 0 |
||||
|
} |
||||
|
} |
||||
|
return false |
||||
|
} |
||||
|
return true |
||||
|
} |
||||
|
start := time.Now() |
||||
|
for flush() { |
||||
|
} |
||||
|
if delayed { |
||||
|
db.writeDelay += time.Since(start) |
||||
|
db.writeDelayN++ |
||||
|
} else if db.writeDelayN > 0 { |
||||
|
db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay) |
||||
|
db.writeDelay = 0 |
||||
|
db.writeDelayN = 0 |
||||
|
} |
||||
|
return |
||||
|
} |
||||
|
|
||||
|
type writeMerge struct { |
||||
|
sync bool |
||||
|
batch *Batch |
||||
|
keyType keyType |
||||
|
key, value []byte |
||||
|
} |
||||
|
|
||||
|
func (db *DB) unlockWrite(overflow bool, merged int, err error) { |
||||
|
for i := 0; i < merged; i++ { |
||||
|
db.writeAckC <- err |
||||
|
} |
||||
|
if overflow { |
||||
|
// Pass lock to the next write (that failed to merge).
|
||||
|
db.writeMergedC <- false |
||||
|
} else { |
||||
|
// Release lock.
|
||||
|
<-db.writeLockC |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// ourBatch if defined should equal with batch.
|
||||
|
func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error { |
||||
|
// Try to flush memdb. This method would also trying to throttle writes
|
||||
|
// if it is too fast and compaction cannot catch-up.
|
||||
|
mdb, mdbFree, err := db.flush(batch.internalLen) |
||||
|
if err != nil { |
||||
|
db.unlockWrite(false, 0, err) |
||||
|
return err |
||||
|
} |
||||
|
defer mdb.decref() |
||||
|
|
||||
|
var ( |
||||
|
overflow bool |
||||
|
merged int |
||||
|
batches = []*Batch{batch} |
||||
|
) |
||||
|
|
||||
|
if merge { |
||||
|
// Merge limit.
|
||||
|
var mergeLimit int |
||||
|
if batch.internalLen > 128<<10 { |
||||
|
mergeLimit = (1 << 20) - batch.internalLen |
||||
|
} else { |
||||
|
mergeLimit = 128 << 10 |
||||
|
} |
||||
|
mergeCap := mdbFree - batch.internalLen |
||||
|
if mergeLimit > mergeCap { |
||||
|
mergeLimit = mergeCap |
||||
|
} |
||||
|
|
||||
|
merge: |
||||
|
for mergeLimit > 0 { |
||||
|
select { |
||||
|
case incoming := <-db.writeMergeC: |
||||
|
if incoming.batch != nil { |
||||
|
// Merge batch.
|
||||
|
if incoming.batch.internalLen > mergeLimit { |
||||
|
overflow = true |
||||
|
break merge |
||||
|
} |
||||
|
batches = append(batches, incoming.batch) |
||||
|
mergeLimit -= incoming.batch.internalLen |
||||
|
} else { |
||||
|
// Merge put.
|
||||
|
internalLen := len(incoming.key) + len(incoming.value) + 8 |
||||
|
if internalLen > mergeLimit { |
||||
|
overflow = true |
||||
|
break merge |
||||
|
} |
||||
|
if ourBatch == nil { |
||||
|
ourBatch = db.batchPool.Get().(*Batch) |
||||
|
ourBatch.Reset() |
||||
|
batches = append(batches, ourBatch) |
||||
|
} |
||||
|
// We can use same batch since concurrent write doesn't
|
||||
|
// guarantee write order.
|
||||
|
ourBatch.appendRec(incoming.keyType, incoming.key, incoming.value) |
||||
|
mergeLimit -= internalLen |
||||
|
} |
||||
|
sync = sync || incoming.sync |
||||
|
merged++ |
||||
|
db.writeMergedC <- true |
||||
|
|
||||
|
default: |
||||
|
break merge |
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Seq number.
|
||||
|
seq := db.seq + 1 |
||||
|
|
||||
|
// Write journal.
|
||||
|
if err := db.writeJournal(batches, seq, sync); err != nil { |
||||
|
db.unlockWrite(overflow, merged, err) |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// Put batches.
|
||||
|
for _, batch := range batches { |
||||
|
if err := batch.putMem(seq, mdb.DB); err != nil { |
||||
|
panic(err) |
||||
|
} |
||||
|
seq += uint64(batch.Len()) |
||||
|
} |
||||
|
|
||||
|
// Incr seq number.
|
||||
|
db.addSeq(uint64(batchesLen(batches))) |
||||
|
|
||||
|
// Rotate memdb if it's reach the threshold.
|
||||
|
if batch.internalLen >= mdbFree { |
||||
|
db.rotateMem(0, false) |
||||
|
} |
||||
|
|
||||
|
db.unlockWrite(overflow, merged, nil) |
||||
|
return nil |
||||
|
} |
||||
|
|
||||
|
// Write apply the given batch to the DB. The batch records will be applied
|
||||
|
// sequentially. Write might be used concurrently, when used concurrently and
|
||||
|
// batch is small enough, write will try to merge the batches. Set NoWriteMerge
|
||||
|
// option to true to disable write merge.
|
||||
|
//
|
||||
|
// It is safe to modify the contents of the arguments after Write returns but
|
||||
|
// not before. Write will not modify content of the batch.
|
||||
|
func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error { |
||||
|
if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// If the batch size is larger than write buffer, it may justified to write
|
||||
|
// using transaction instead. Using transaction the batch will be written
|
||||
|
// into tables directly, skipping the journaling.
|
||||
|
if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() { |
||||
|
tr, err := db.OpenTransaction() |
||||
|
if err != nil { |
||||
|
return err |
||||
|
} |
||||
|
if err := tr.Write(batch, wo); err != nil { |
||||
|
tr.Discard() |
||||
|
return err |
||||
|
} |
||||
|
return tr.Commit() |
||||
|
} |
||||
|
|
||||
|
merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() |
||||
|
sync := wo.GetSync() && !db.s.o.GetNoSync() |
||||
|
|
||||
|
// Acquire write lock.
|
||||
|
if merge { |
||||
|
select { |
||||
|
case db.writeMergeC <- writeMerge{sync: sync, batch: batch}: |
||||
|
if <-db.writeMergedC { |
||||
|
// Write is merged.
|
||||
|
return <-db.writeAckC |
||||
|
} |
||||
|
// Write is not merged, the write lock is handed to us. Continue.
|
||||
|
case db.writeLockC <- struct{}{}: |
||||
|
// Write lock acquired.
|
||||
|
case err := <-db.compPerErrC: |
||||
|
// Compaction error.
|
||||
|
return err |
||||
|
case <-db.closeC: |
||||
|
// Closed
|
||||
|
return ErrClosed |
||||
|
} |
||||
|
} else { |
||||
|
select { |
||||
|
case db.writeLockC <- struct{}{}: |
||||
|
// Write lock acquired.
|
||||
|
case err := <-db.compPerErrC: |
||||
|
// Compaction error.
|
||||
|
return err |
||||
|
case <-db.closeC: |
||||
|
// Closed
|
||||
|
return ErrClosed |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return db.writeLocked(batch, nil, merge, sync) |
||||
|
} |
||||
|
|
||||
|
func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error { |
||||
|
if err := db.ok(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge() |
||||
|
sync := wo.GetSync() && !db.s.o.GetNoSync() |
||||
|
|
||||
|
// Acquire write lock.
|
||||
|
if merge { |
||||
|
select { |
||||
|
case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}: |
||||
|
if <-db.writeMergedC { |
||||
|
// Write is merged.
|
||||
|
return <-db.writeAckC |
||||
|
} |
||||
|
// Write is not merged, the write lock is handed to us. Continue.
|
||||
|
case db.writeLockC <- struct{}{}: |
||||
|
// Write lock acquired.
|
||||
|
case err := <-db.compPerErrC: |
||||
|
// Compaction error.
|
||||
|
return err |
||||
|
case <-db.closeC: |
||||
|
// Closed
|
||||
|
return ErrClosed |
||||
|
} |
||||
|
} else { |
||||
|
select { |
||||
|
case db.writeLockC <- struct{}{}: |
||||
|
// Write lock acquired.
|
||||
|
case err := <-db.compPerErrC: |
||||
|
// Compaction error.
|
||||
|
return err |
||||
|
case <-db.closeC: |
||||
|
// Closed
|
||||
|
return ErrClosed |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
batch := db.batchPool.Get().(*Batch) |
||||
|
batch.Reset() |
||||
|
batch.appendRec(kt, key, value) |
||||
|
return db.writeLocked(batch, batch, merge, sync) |
||||
|
} |
||||
|
|
||||
|
// Put sets the value for the given key. It overwrites any previous value
|
||||
|
// for that key; a DB is not a multi-map. Write merge also applies for Put, see
|
||||
|
// Write.
|
||||
|
//
|
||||
|
// It is safe to modify the contents of the arguments after Put returns but not
|
||||
|
// before.
|
||||
|
func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error { |
||||
|
return db.putRec(keyTypeVal, key, value, wo) |
||||
|
} |
||||
|
|
||||
|
// Delete deletes the value for the given key. Delete will not returns error if
|
||||
|
// key doesn't exist. Write merge also applies for Delete, see Write.
|
||||
|
//
|
||||
|
// It is safe to modify the contents of the arguments after Delete returns but
|
||||
|
// not before.
|
||||
|
func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error { |
||||
|
return db.putRec(keyTypeDel, key, nil, wo) |
||||
|
} |
||||
|
|
||||
|
func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool { |
||||
|
iter := mem.NewIterator(nil) |
||||
|
defer iter.Release() |
||||
|
return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) && |
||||
|
(min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0)) |
||||
|
} |
||||
|
|
||||
|
// CompactRange compacts the underlying DB for the given key range.
|
||||
|
// In particular, deleted and overwritten versions are discarded,
|
||||
|
// and the data is rearranged to reduce the cost of operations
|
||||
|
// needed to access the data. This operation should typically only
|
||||
|
// be invoked by users who understand the underlying implementation.
|
||||
|
//
|
||||
|
// A nil Range.Start is treated as a key before all keys in the DB.
|
||||
|
// And a nil Range.Limit is treated as a key after all keys in the DB.
|
||||
|
// Therefore if both is nil then it will compact entire DB.
|
||||
|
func (db *DB) CompactRange(r util.Range) error { |
||||
|
if err := db.ok(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// Lock writer.
|
||||
|
select { |
||||
|
case db.writeLockC <- struct{}{}: |
||||
|
case err := <-db.compPerErrC: |
||||
|
return err |
||||
|
case <-db.closeC: |
||||
|
return ErrClosed |
||||
|
} |
||||
|
|
||||
|
// Check for overlaps in memdb.
|
||||
|
mdb := db.getEffectiveMem() |
||||
|
if mdb == nil { |
||||
|
return ErrClosed |
||||
|
} |
||||
|
defer mdb.decref() |
||||
|
if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) { |
||||
|
// Memdb compaction.
|
||||
|
if _, err := db.rotateMem(0, false); err != nil { |
||||
|
<-db.writeLockC |
||||
|
return err |
||||
|
} |
||||
|
<-db.writeLockC |
||||
|
if err := db.compTriggerWait(db.mcompCmdC); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
} else { |
||||
|
<-db.writeLockC |
||||
|
} |
||||
|
|
||||
|
// Table compaction.
|
||||
|
return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit) |
||||
|
} |
||||
|
|
||||
|
// SetReadOnly makes DB read-only. It will stay read-only until reopened.
|
||||
|
func (db *DB) SetReadOnly() error { |
||||
|
if err := db.ok(); err != nil { |
||||
|
return err |
||||
|
} |
||||
|
|
||||
|
// Lock writer.
|
||||
|
select { |
||||
|
case db.writeLockC <- struct{}{}: |
||||
|
db.compWriteLocking = true |
||||
|
case err := <-db.compPerErrC: |
||||
|
return err |
||||
|
case <-db.closeC: |
||||
|
return ErrClosed |
||||
|
} |
||||
|
|
||||
|
// Set compaction read-only.
|
||||
|
select { |
||||
|
case db.compErrSetC <- ErrReadOnly: |
||||
|
case perr := <-db.compPerErrC: |
||||
|
return perr |
||||
|
case <-db.closeC: |
||||
|
return ErrClosed |
||||
|
} |
||||
|
|
||||
|
return nil |
||||
|
} |
@ -0,0 +1,90 @@ |
|||||
|
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
// Package leveldb provides implementation of LevelDB key/value database.
|
||||
|
//
|
||||
|
// Create or open a database:
|
||||
|
//
|
||||
|
// db, err := leveldb.OpenFile("path/to/db", nil)
|
||||
|
// ...
|
||||
|
// defer db.Close()
|
||||
|
// ...
|
||||
|
//
|
||||
|
// Read or modify the database content:
|
||||
|
//
|
||||
|
// // Remember that the contents of the returned slice should not be modified.
|
||||
|
// data, err := db.Get([]byte("key"), nil)
|
||||
|
// ...
|
||||
|
// err = db.Put([]byte("key"), []byte("value"), nil)
|
||||
|
// ...
|
||||
|
// err = db.Delete([]byte("key"), nil)
|
||||
|
// ...
|
||||
|
//
|
||||
|
// Iterate over database content:
|
||||
|
//
|
||||
|
// iter := db.NewIterator(nil, nil)
|
||||
|
// for iter.Next() {
|
||||
|
// // Remember that the contents of the returned slice should not be modified, and
|
||||
|
// // only valid until the next call to Next.
|
||||
|
// key := iter.Key()
|
||||
|
// value := iter.Value()
|
||||
|
// ...
|
||||
|
// }
|
||||
|
// iter.Release()
|
||||
|
// err = iter.Error()
|
||||
|
// ...
|
||||
|
//
|
||||
|
// Iterate over subset of database content with a particular prefix:
|
||||
|
// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
|
||||
|
// for iter.Next() {
|
||||
|
// // Use key/value.
|
||||
|
// ...
|
||||
|
// }
|
||||
|
// iter.Release()
|
||||
|
// err = iter.Error()
|
||||
|
// ...
|
||||
|
//
|
||||
|
// Seek-then-Iterate:
|
||||
|
//
|
||||
|
// iter := db.NewIterator(nil, nil)
|
||||
|
// for ok := iter.Seek(key); ok; ok = iter.Next() {
|
||||
|
// // Use key/value.
|
||||
|
// ...
|
||||
|
// }
|
||||
|
// iter.Release()
|
||||
|
// err = iter.Error()
|
||||
|
// ...
|
||||
|
//
|
||||
|
// Iterate over subset of database content:
|
||||
|
//
|
||||
|
// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil)
|
||||
|
// for iter.Next() {
|
||||
|
// // Use key/value.
|
||||
|
// ...
|
||||
|
// }
|
||||
|
// iter.Release()
|
||||
|
// err = iter.Error()
|
||||
|
// ...
|
||||
|
//
|
||||
|
// Batch writes:
|
||||
|
//
|
||||
|
// batch := new(leveldb.Batch)
|
||||
|
// batch.Put([]byte("foo"), []byte("value"))
|
||||
|
// batch.Put([]byte("bar"), []byte("another value"))
|
||||
|
// batch.Delete([]byte("baz"))
|
||||
|
// err = db.Write(batch, nil)
|
||||
|
// ...
|
||||
|
//
|
||||
|
// Use bloom filter:
|
||||
|
//
|
||||
|
// o := &opt.Options{
|
||||
|
// Filter: filter.NewBloomFilter(10),
|
||||
|
// }
|
||||
|
// db, err := leveldb.OpenFile("path/to/db", o)
|
||||
|
// ...
|
||||
|
// defer db.Close()
|
||||
|
// ...
|
||||
|
package leveldb |
@ -0,0 +1,20 @@ |
|||||
|
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
package leveldb |
||||
|
|
||||
|
import ( |
||||
|
"github.com/syndtr/goleveldb/leveldb/errors" |
||||
|
) |
||||
|
|
||||
|
// Common errors.
|
||||
|
var ( |
||||
|
ErrNotFound = errors.ErrNotFound |
||||
|
ErrReadOnly = errors.New("leveldb: read-only mode") |
||||
|
ErrSnapshotReleased = errors.New("leveldb: snapshot released") |
||||
|
ErrIterReleased = errors.New("leveldb: iterator released") |
||||
|
ErrClosed = errors.New("leveldb: closed") |
||||
|
) |
@ -0,0 +1,78 @@ |
|||||
|
// Copyright (c) 2014, Suryandaru Triandana <syndtr@gmail.com>
|
||||
|
// All rights reserved.
|
||||
|
//
|
||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||
|
// found in the LICENSE file.
|
||||
|
|
||||
|
// Package errors provides common error types used throughout leveldb.
|
||||
|
package errors |
||||
|
|
||||
|
import ( |
||||
|
"errors" |
||||
|
"fmt" |
||||
|
|
||||
|
"github.com/syndtr/goleveldb/leveldb/storage" |
||||
|
"github.com/syndtr/goleveldb/leveldb/util" |
||||
|
) |
||||
|
|
||||
|
// Common errors.
|
||||
|
var ( |
||||
|
ErrNotFound = New("leveldb: not found") |
||||
|
ErrReleased = util.ErrReleased |
||||
|
ErrHasReleaser = util.ErrHasReleaser |
||||
|
) |
||||
|
|
||||
|
// New returns an error that formats as the given text.
|
||||
|
func New(text string) error { |
||||
|
return errors.New(text) |
||||
|
} |
||||
|
|
||||
|
// ErrCorrupted is the type that wraps errors that indicate corruption in
|
||||
|
// the database.
|
||||
|
type ErrCorrupted struct { |
||||
|
Fd storage.FileDesc |
||||
|
Err error |
||||
|
} |
||||
|
|
||||
|
func (e *ErrCorrupted) Error() string { |
||||
|
if !e.Fd.Zero() { |
||||
|
return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd) |
||||
|
} |
||||
|
return e.Err.Error() |
||||
|
} |
||||
|
|
||||
|
// NewErrCorrupted creates new ErrCorrupted error.
|
||||
|
func NewErrCorrupted(fd storage.FileDesc, err error) error { |
||||
|
return &ErrCorrupted{fd, err} |
||||
|
} |
||||
|
|
||||
|
// IsCorrupted returns a boolean indicating whether the error is indicating
|
||||
|
// a corruption.
|
||||
|
func IsCorrupted(err error) bool { |
||||
|
switch err.(type) { |
||||
|
case *ErrCorrupted: |
||||
|
return true |
||||
|
case *storage.ErrCorrupted: |
||||
|
return true |
||||
|
} |
||||
|
return false |
||||
|
} |
||||
|
|
||||
|
// ErrMissingFiles is the type that indicating a corruption due to missing
|
||||
|
// files. ErrMissingFiles always wrapped with ErrCorrupted.
|
||||
|
type ErrMissingFiles struct { |
||||
|
Fds []storage.FileDesc |
||||
|
} |
||||
|
|
||||
|
func (e *ErrMissingFiles) Error() string { return "file missing" } |
||||
|
|
||||
|
// SetFd sets 'file info' of the given error with the given file.
|
||||
|
// Currently only ErrCorrupted is supported, otherwise will do nothing.
|
||||
|
func SetFd(err error, fd storage.FileDesc) error { |
||||
|
switch x := err.(type) { |
||||
|
case *ErrCorrupted: |
||||
|
x.Fd = fd |
||||
|
return x |
||||
|
} |
||||
|
return err |
||||
|
} |
Some files were not shown because too many files changed in this diff
Write
Preview
Loading…
Cancel
Save
Reference in new issue