Commit 27088ac5 authored by manish's avatar manish
Browse files

Replace RocksDB by goleveldb

https://jira.hyperledger.org/browse/FAB-788
RocksDB has a patent infringement license in it from Facebook
(https://github.com/facebook/rocksdb/blob/master/PATENTS)
Many users may not be comfortable with the term of the license.

The alternatives include
1. LevelDB (https://github.com/google/leveldb) with a go wrapper (https://github.com/jmhodges/levigo),
2. goleveldb (https://github.com/syndtr/goleveldb) - a porting of leveldb in golang
3. BoltDB (https://github.com/boltdb/bolt)

BoltDB is suitable for read heavy workloads (e.g., LDAP)
but has a relatively poor performance for read-write workloads.
Of the other two options, goleveldb is chosen because it is
implemented in golang and hence easy to intergate and maintain.
In addition, as a precedent, ethereum go implementation also
uses this package
https://github.com/ethereum/go-ethereum/blob/master/ethdb/database.go



Change-Id: Ia4fb5a6f9299e613d03d8b414a51bf479bfafd59
Signed-off-by: default avatarmanish <manish.sethi@gmail.com>
parent 8c9dcc98
......@@ -27,7 +27,6 @@ import (
"github.com/hyperledger/fabric/core/ledger/util/db"
"github.com/hyperledger/fabric/protos"
"github.com/op/go-logging"
"github.com/tecbot/gorocksdb"
)
var logger = logging.MustGetLogger("kvledger")
......@@ -45,7 +44,6 @@ type blockfileMgr struct {
rootDir string
conf *Conf
db *db.DB
defaultCF *gorocksdb.ColumnFamilyHandle
index index
cpInfo *checkpointInfo
cpInfoCond *sync.Cond
......@@ -60,7 +58,7 @@ func newBlockfileMgr(conf *Conf, indexConfig *blkstorage.IndexConfig) *blockfile
panic(fmt.Sprintf("Error: %s", err))
}
db := initDB(conf)
mgr := &blockfileMgr{rootDir: rootDir, conf: conf, db: db, defaultCF: db.GetDefaultCFHandle()}
mgr := &blockfileMgr{rootDir: rootDir, conf: conf, db: db}
cpInfo, err := mgr.loadCurrentInfo()
if err != nil {
panic(fmt.Sprintf("Could not get block file info for current block file from db: %s", err))
......@@ -82,7 +80,7 @@ func newBlockfileMgr(conf *Conf, indexConfig *blkstorage.IndexConfig) *blockfile
panic(fmt.Sprintf("Could not truncate current file to known size in db: %s", err))
}
mgr.index = newBlockIndex(indexConfig, db, db.GetCFHandle(blockIndexCF))
mgr.index = newBlockIndex(indexConfig, db)
mgr.cpInfo = cpInfo
mgr.currentFileWriter = currentFileWriter
mgr.cpInfoCond = sync.NewCond(&sync.Mutex{})
......@@ -115,10 +113,7 @@ func newBlockfileMgr(conf *Conf, indexConfig *blkstorage.IndexConfig) *blockfile
func initDB(conf *Conf) *db.DB {
dbInst := db.CreateDB(&db.Conf{
DBPath: conf.dbPath,
CFNames: []string{blockIndexCF},
DisableWAL: true})
DBPath: conf.dbPath})
dbInst.Open()
return dbInst
}
......@@ -421,7 +416,7 @@ func (mgr *blockfileMgr) fetchRawBytes(lp *fileLocPointer) ([]byte, error) {
func (mgr *blockfileMgr) loadCurrentInfo() (*checkpointInfo, error) {
var b []byte
var err error
if b, err = mgr.db.Get(mgr.defaultCF, blkMgrInfoKey); b == nil || err != nil {
if b, err = mgr.db.Get(blkMgrInfoKey); b == nil || err != nil {
return nil, err
}
i := &checkpointInfo{}
......@@ -432,20 +427,14 @@ func (mgr *blockfileMgr) loadCurrentInfo() (*checkpointInfo, error) {
return i, nil
}
func (mgr *blockfileMgr) saveCurrentInfo(i *checkpointInfo, flush bool) error {
func (mgr *blockfileMgr) saveCurrentInfo(i *checkpointInfo, sync bool) error {
b, err := i.marshal()
if err != nil {
return err
}
if err = mgr.db.Put(mgr.defaultCF, blkMgrInfoKey, b); err != nil {
if err = mgr.db.Put(blkMgrInfoKey, b, sync); err != nil {
return err
}
if flush {
if err = mgr.db.Flush(true); err != nil {
return err
}
logger.Debugf("saved checkpointInfo:%s", i)
}
return nil
}
......
......@@ -23,7 +23,7 @@ import (
"github.com/hyperledger/fabric/core/ledger/blkstorage"
"github.com/hyperledger/fabric/core/ledger/util"
"github.com/hyperledger/fabric/core/ledger/util/db"
"github.com/tecbot/gorocksdb"
"github.com/syndtr/goleveldb/leveldb"
)
const (
......@@ -53,24 +53,22 @@ type blockIdxInfo struct {
type blockIndex struct {
indexItemsMap map[blkstorage.IndexableAttr]bool
db *db.DB
blockIndexCF *gorocksdb.ColumnFamilyHandle
}
func newBlockIndex(indexConfig *blkstorage.IndexConfig, db *db.DB,
indexCFHandle *gorocksdb.ColumnFamilyHandle) *blockIndex {
func newBlockIndex(indexConfig *blkstorage.IndexConfig, db *db.DB) *blockIndex {
indexItems := indexConfig.AttrsToIndex
logger.Debugf("newBlockIndex() - indexItems:[%s]", indexItems)
indexItemsMap := make(map[blkstorage.IndexableAttr]bool)
for _, indexItem := range indexItems {
indexItemsMap[indexItem] = true
}
return &blockIndex{indexItemsMap, db, indexCFHandle}
return &blockIndex{indexItemsMap, db}
}
func (index *blockIndex) getLastBlockIndexed() (uint64, error) {
var blockNumBytes []byte
var err error
if blockNumBytes, err = index.db.Get(index.blockIndexCF, indexCheckpointKey); err != nil {
if blockNumBytes, err = index.db.Get(indexCheckpointKey); err != nil {
return 0, nil
}
return decodeBlockNum(blockNumBytes), nil
......@@ -85,19 +83,18 @@ func (index *blockIndex) indexBlock(blockIdxInfo *blockIdxInfo) error {
logger.Debugf("Indexing block [%s]", blockIdxInfo)
flp := blockIdxInfo.flp
txOffsets := blockIdxInfo.txOffsets
batch := gorocksdb.NewWriteBatch()
defer batch.Destroy()
batch := &leveldb.Batch{}
flpBytes, err := flp.marshal()
if err != nil {
return err
}
if _, ok := index.indexItemsMap[blkstorage.IndexableAttrBlockHash]; ok {
batch.PutCF(index.blockIndexCF, constructBlockHashKey(blockIdxInfo.blockHash), flpBytes)
batch.Put(constructBlockHashKey(blockIdxInfo.blockHash), flpBytes)
}
if _, ok := index.indexItemsMap[blkstorage.IndexableAttrBlockNum]; ok {
batch.PutCF(index.blockIndexCF, constructBlockNumKey(blockIdxInfo.blockNum), flpBytes)
batch.Put(constructBlockNumKey(blockIdxInfo.blockNum), flpBytes)
}
if _, ok := index.indexItemsMap[blkstorage.IndexableAttrTxID]; ok {
......@@ -110,12 +107,12 @@ func (index *blockIndex) indexBlock(blockIdxInfo *blockIdxInfo) error {
if marshalErr != nil {
return marshalErr
}
batch.PutCF(index.blockIndexCF, constructTxIDKey(txID), txFlpBytes)
batch.Put(constructTxIDKey(txID), txFlpBytes)
}
}
batch.PutCF(index.blockIndexCF, indexCheckpointKey, encodeBlockNum(blockIdxInfo.blockNum))
if err := index.db.WriteBatch(batch); err != nil {
batch.Put(indexCheckpointKey, encodeBlockNum(blockIdxInfo.blockNum))
if err := index.db.WriteBatch(batch, false); err != nil {
return err
}
return nil
......@@ -125,7 +122,7 @@ func (index *blockIndex) getBlockLocByHash(blockHash []byte) (*fileLocPointer, e
if _, ok := index.indexItemsMap[blkstorage.IndexableAttrBlockHash]; !ok {
return nil, blkstorage.ErrAttrNotIndexed
}
b, err := index.db.Get(index.blockIndexCF, constructBlockHashKey(blockHash))
b, err := index.db.Get(constructBlockHashKey(blockHash))
if err != nil {
return nil, err
}
......@@ -141,7 +138,7 @@ func (index *blockIndex) getBlockLocByBlockNum(blockNum uint64) (*fileLocPointer
if _, ok := index.indexItemsMap[blkstorage.IndexableAttrBlockNum]; !ok {
return nil, blkstorage.ErrAttrNotIndexed
}
b, err := index.db.Get(index.blockIndexCF, constructBlockNumKey(blockNum))
b, err := index.db.Get(constructBlockNumKey(blockNum))
if err != nil {
return nil, err
}
......@@ -157,7 +154,7 @@ func (index *blockIndex) getTxLoc(txID string) (*fileLocPointer, error) {
if _, ok := index.indexItemsMap[blkstorage.IndexableAttrTxID]; !ok {
return nil, blkstorage.ErrAttrNotIndexed
}
b, err := index.db.Get(index.blockIndexCF, constructTxIDKey(txID))
b, err := index.db.Get(constructTxIDKey(txID))
if err != nil {
return nil, err
}
......
......@@ -28,7 +28,6 @@ import (
"github.com/hyperledger/fabric/protos"
putils "github.com/hyperledger/fabric/protos/utils"
"github.com/op/go-logging"
"github.com/tecbot/gorocksdb"
)
var logger = logging.MustGetLogger("couchdbtxmgmt")
......@@ -68,7 +67,6 @@ func (u *updateSet) get(compositeKey []byte) *versionedValue {
// This implementation uses a read-write lock to prevent conflicts between transaction simulation and committing
type CouchDBTxMgr struct {
db *db.DB
stateIndexCF *gorocksdb.ColumnFamilyHandle
updateSet *updateSet
commitRWLock sync.RWMutex
couchDB *couchdb.CouchDBConnectionDef // COUCHDB new properties for CouchDB
......@@ -87,7 +85,7 @@ type CouchConnection struct {
func NewCouchDBTxMgr(conf *Conf, host string, port int, dbName string, id string, pw string) *CouchDBTxMgr {
// TODO cleanup this RocksDB handle
db := db.CreateDB(&db.Conf{DBPath: conf.DBPath, CFNames: []string{}})
db := db.CreateDB(&db.Conf{DBPath: conf.DBPath})
db.Open()
couchDB, err := couchdb.CreateConnectionDefinition(host,
......@@ -106,7 +104,7 @@ func NewCouchDBTxMgr(conf *Conf, host string, port int, dbName string, id string
}
// db and stateIndexCF will not be used for CouchDB. TODO to cleanup
return &CouchDBTxMgr{db: db, stateIndexCF: db.GetDefaultCFHandle(), couchDB: couchDB}
return &CouchDBTxMgr{db: db, couchDB: couchDB}
}
// NewQueryExecutor implements method in interface `txmgmt.TxMgr`
......
......@@ -27,7 +27,7 @@ import (
"github.com/hyperledger/fabric/protos"
putils "github.com/hyperledger/fabric/protos/utils"
"github.com/op/go-logging"
"github.com/tecbot/gorocksdb"
"github.com/syndtr/goleveldb/leveldb"
)
var logger = logging.MustGetLogger("lockbasedtxmgmt")
......@@ -67,16 +67,15 @@ func (u *updateSet) get(compositeKey []byte) *versionedValue {
// This implementation uses a read-write lock to prevent conflicts between transaction simulation and committing
type LockBasedTxMgr struct {
db *db.DB
stateIndexCF *gorocksdb.ColumnFamilyHandle
updateSet *updateSet
commitRWLock sync.RWMutex
}
// NewLockBasedTxMgr constructs a `LockBasedTxMgr`
func NewLockBasedTxMgr(conf *Conf) *LockBasedTxMgr {
db := db.CreateDB(&db.Conf{DBPath: conf.DBPath, CFNames: []string{}})
db := db.CreateDB(&db.Conf{DBPath: conf.DBPath})
db.Open()
return &LockBasedTxMgr{db: db, stateIndexCF: db.GetDefaultCFHandle()}
return &LockBasedTxMgr{db: db}
}
// NewQueryExecutor implements method in interface `txmgmt.TxMgr`
......@@ -226,18 +225,17 @@ func (txmgr *LockBasedTxMgr) addWriteSetToBatch(txRWSet *txmgmt.TxReadWriteSet)
// Commit implements method in interface `txmgmt.TxMgr`
func (txmgr *LockBasedTxMgr) Commit() error {
batch := gorocksdb.NewWriteBatch()
batch := &leveldb.Batch{}
if txmgr.updateSet == nil {
panic("validateAndPrepare() method should have been called before calling commit()")
}
for k, v := range txmgr.updateSet.m {
batch.PutCF(txmgr.stateIndexCF, []byte(k), encodeValue(v.value, v.version))
batch.Put([]byte(k), encodeValue(v.value, v.version))
}
txmgr.commitRWLock.Lock()
defer txmgr.commitRWLock.Unlock()
defer func() { txmgr.updateSet = nil }()
defer batch.Destroy()
if err := txmgr.db.WriteBatch(batch); err != nil {
if err := txmgr.db.WriteBatch(batch, false); err != nil {
return err
}
return nil
......@@ -261,7 +259,7 @@ func (txmgr *LockBasedTxMgr) getCommittedValueAndVersion(ns string, key string)
compositeKey := constructCompositeKey(ns, key)
var encodedValue []byte
var err error
if encodedValue, err = txmgr.db.Get(txmgr.stateIndexCF, compositeKey); err != nil {
if encodedValue, err = txmgr.db.Get(compositeKey); err != nil {
return nil, 0, err
}
if encodedValue == nil {
......
......@@ -22,7 +22,8 @@ import (
"github.com/hyperledger/fabric/core/ledger/util"
"github.com/op/go-logging"
"github.com/tecbot/gorocksdb"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
)
var logger = logging.MustGetLogger("kvledger.db")
......@@ -30,92 +31,71 @@ var logger = logging.MustGetLogger("kvledger.db")
type dbState int32
const (
defaultCFName = "default"
closed dbState = iota
closed dbState = iota
opened
)
// Conf configuration for `DB`
type Conf struct {
DBPath string
CFNames []string
DisableWAL bool
DBPath string
}
// DB - a rocksDB instance
// DB - a wrapper on an actual store
type DB struct {
conf *Conf
rocksDB *gorocksdb.DB
cfHandlesMap map[string]*gorocksdb.ColumnFamilyHandle
dbState dbState
mux sync.Mutex
readOpts *gorocksdb.ReadOptions
writeOpts *gorocksdb.WriteOptions
conf *Conf
db *leveldb.DB
dbState dbState
mux sync.Mutex
readOpts *opt.ReadOptions
writeOptsNoSync *opt.WriteOptions
writeOptsSync *opt.WriteOptions
}
// CreateDB constructs a `DB`
func CreateDB(conf *Conf) *DB {
conf.CFNames = append(conf.CFNames, defaultCFName)
readOpts := gorocksdb.NewDefaultReadOptions()
writeOpts := gorocksdb.NewDefaultWriteOptions()
writeOpts.DisableWAL(conf.DisableWAL)
readOpts := &opt.ReadOptions{}
writeOptsNoSync := &opt.WriteOptions{}
writeOptsSync := &opt.WriteOptions{}
writeOptsSync.Sync = true
return &DB{
conf: conf,
cfHandlesMap: make(map[string]*gorocksdb.ColumnFamilyHandle),
dbState: closed,
readOpts: readOpts,
writeOpts: writeOpts}
conf: conf,
dbState: closed,
readOpts: readOpts,
writeOptsNoSync: writeOptsNoSync,
writeOptsSync: writeOptsSync}
}
// Open open underlying rocksdb
// Open opens the underlying db
func (dbInst *DB) Open() {
dbInst.mux.Lock()
defer dbInst.mux.Unlock()
if dbInst.dbState == opened {
dbInst.mux.Unlock()
return
}
defer dbInst.mux.Unlock()
dbOpts := &opt.Options{}
dbPath := dbInst.conf.DBPath
dirEmpty, err := util.CreateDirIfMissing(dbPath)
if err != nil {
var err error
var dirEmpty bool
if dirEmpty, err = util.CreateDirIfMissing(dbPath); err != nil {
panic(fmt.Sprintf("Error while trying to open DB: %s", err))
}
opts := gorocksdb.NewDefaultOptions()
defer opts.Destroy()
opts.SetCreateIfMissing(dirEmpty)
opts.SetCreateIfMissingColumnFamilies(true)
var cfOpts []*gorocksdb.Options
for range dbInst.conf.CFNames {
cfOpts = append(cfOpts, opts)
}
db, cfHandlers, err := gorocksdb.OpenDbColumnFamilies(opts, dbPath, dbInst.conf.CFNames, cfOpts)
if err != nil {
panic(fmt.Sprintf("Error opening DB: %s", err))
}
dbInst.rocksDB = db
for i := 0; i < len(dbInst.conf.CFNames); i++ {
dbInst.cfHandlesMap[dbInst.conf.CFNames[i]] = cfHandlers[i]
dbOpts.ErrorIfMissing = !dirEmpty
if dbInst.db, err = leveldb.OpenFile(dbPath, dbOpts); err != nil {
panic(fmt.Sprintf("Error while trying to open DB: %s", err))
}
dbInst.dbState = opened
}
// Close releases all column family handles and closes rocksdb
// Close closes the underlying db
func (dbInst *DB) Close() {
dbInst.mux.Lock()
defer dbInst.mux.Unlock()
if dbInst.dbState == closed {
dbInst.mux.Unlock()
return
}
defer dbInst.mux.Unlock()
for _, cfHandler := range dbInst.cfHandlesMap {
cfHandler.Destroy()
}
dbInst.rocksDB.Close()
dbInst.db.Close()
dbInst.dbState = closed
}
......@@ -125,74 +105,55 @@ func (dbInst *DB) isOpen() bool {
return dbInst.dbState == opened
}
// Get returns the value for the given column family and key
func (dbInst *DB) Get(cfHandle *gorocksdb.ColumnFamilyHandle, key []byte) ([]byte, error) {
slice, err := dbInst.rocksDB.GetCF(dbInst.readOpts, cfHandle, key)
// Get returns the value for the given key
func (dbInst *DB) Get(key []byte) ([]byte, error) {
value, err := dbInst.db.Get(key, dbInst.readOpts)
if err == leveldb.ErrNotFound {
err = nil
}
if err != nil {
fmt.Println("Error while trying to retrieve key:", key)
logger.Errorf("Error while trying to retrieve key [%#v]: %s", key, err)
return nil, err
}
defer slice.Free()
if slice.Data() == nil {
return nil, nil
}
data := makeCopy(slice.Data())
return data, nil
return value, nil
}
// Put saves the key/value in the given column family
func (dbInst *DB) Put(cfHandle *gorocksdb.ColumnFamilyHandle, key []byte, value []byte) error {
err := dbInst.rocksDB.PutCF(dbInst.writeOpts, cfHandle, key, value)
// Put saves the key/value
func (dbInst *DB) Put(key []byte, value []byte, sync bool) error {
wo := dbInst.writeOptsNoSync
if sync {
wo = dbInst.writeOptsSync
}
err := dbInst.db.Put(key, value, wo)
if err != nil {
fmt.Println("Error while trying to write key:", key)
logger.Errorf("Error while trying to write key [%#v]", key)
return err
}
return nil
}
// Delete delets the given key in the specified column family
func (dbInst *DB) Delete(cfHandle *gorocksdb.ColumnFamilyHandle, key []byte) error {
err := dbInst.rocksDB.DeleteCF(dbInst.writeOpts, cfHandle, key)
// Delete deletes the given key
func (dbInst *DB) Delete(key []byte, sync bool) error {
wo := dbInst.writeOptsNoSync
if sync {
wo = dbInst.writeOptsSync
}
err := dbInst.db.Delete(key, wo)
if err != nil {
fmt.Println("Error while trying to delete key:", key)
logger.Errorf("Error while trying to delete key [%#v]", key)
return err
}
return nil
}
// WriteBatch writes a batch
func (dbInst *DB) WriteBatch(batch *gorocksdb.WriteBatch) error {
if err := dbInst.rocksDB.Write(dbInst.writeOpts, batch); err != nil {
func (dbInst *DB) WriteBatch(batch *leveldb.Batch, sync bool) error {
wo := dbInst.writeOptsNoSync
if sync {
wo = dbInst.writeOptsSync
}
if err := dbInst.db.Write(batch, wo); err != nil {
return err
}
return nil
}
// GetIterator returns an iterator for the given column family
func (dbInst *DB) GetIterator(cfName string) *gorocksdb.Iterator {
return dbInst.rocksDB.NewIteratorCF(dbInst.readOpts, dbInst.GetCFHandle(cfName))
}
// GetCFHandle returns handle to a named column family
func (dbInst *DB) GetCFHandle(cfName string) *gorocksdb.ColumnFamilyHandle {
return dbInst.cfHandlesMap[cfName]
}
// GetDefaultCFHandle returns handle to default column family
func (dbInst *DB) GetDefaultCFHandle() *gorocksdb.ColumnFamilyHandle {
return dbInst.GetCFHandle(defaultCFName)
}
// Flush flushes rocksDB memory to sst files
func (dbInst *DB) Flush(wait bool) error {
flushOpts := gorocksdb.NewDefaultFlushOptions()
defer flushOpts.Destroy()
flushOpts.SetWait(wait)
return dbInst.rocksDB.Flush(flushOpts)
}
func makeCopy(src []byte) []byte {
dest := make([]byte, len(src))
copy(dest, src)
return dest
}
......@@ -17,28 +17,34 @@ limitations under the License.
package db
import (
"os"
"testing"
"github.com/hyperledger/fabric/core/ledger/testutil"
)
func TestDBBasicWriteAndReads(t *testing.T) {
dbConf := &Conf{"/tmp/v2/test/db", []string{"cf1", "cf2"}, false}
testDBPath := "/tmp/test/hyperledger/fabric/core/ledger/util/db"
if err := os.RemoveAll(testDBPath); err != nil {
t.Fatalf("Error:%s", err)
}
dbConf := &Conf{testDBPath}
defer func() { os.RemoveAll(testDBPath) }()
db := CreateDB(dbConf)
db.Open()
defer db.Close()
db.Put(db.GetCFHandle("cf1"), []byte("key1"), []byte("value1"))
db.Put(db.GetCFHandle("cf2"), []byte("key2"), []byte("value2"))
db.Put(db.GetDefaultCFHandle(), []byte("key3"), []byte("value3"))
val, err := db.Get(db.GetCFHandle("cf1"), []byte("key1"))
db.Put([]byte("key1"), []byte("value1"), false)
db.Put([]byte("key2"), []byte("value2"), false)
db.Put([]byte("key3"), []byte("value3"), false)
val, err := db.Get([]byte("key1"))
testutil.AssertNoError(t, err, "")
testutil.AssertEquals(t, val, []byte("value1"))
val, err = db.Get(db.GetCFHandle("cf2"), []byte("key2"))
val, err = db.Get([]byte("key2"))
testutil.AssertNoError(t, err, "")
testutil.AssertEquals(t, val, []byte("value2"))
val, err = db.Get(db.GetDefaultCFHandle(), []byte("key3"))
val, err = db.Get([]byte("key3"))
testutil.AssertNoError(t, err, "")
testutil.AssertEquals(t, val, []byte("value3"))
}
Copyright 2012 Suryandaru Triandana <syndtr@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"encoding/binary"
"fmt"