Browse Source

maintain the unmaintained

pull/1427/head
Chris Lu 4 years ago
parent
commit
332caf0cd7
  1. 6
      unmaintained/diff_volume_servers/diff_volume_servers.go
  2. 4
      unmaintained/fix_dat/fix_dat.go
  3. 83
      unmaintained/see_dat/see_dat_gzip.go
  4. 2
      unmaintained/see_idx/see_idx.go

6
unmaintained/diff_volume_servers/diff_volume_servers.go

@ -118,7 +118,7 @@ const (
type needleState struct { type needleState struct {
state uint8 state uint8
size uint32
size types.Size
} }
func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int64, error) { func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int64, error) {
@ -154,8 +154,8 @@ func getVolumeFiles(v uint32, addr string) (map[types.NeedleId]needleState, int6
var maxOffset int64 var maxOffset int64
files := map[types.NeedleId]needleState{} files := map[types.NeedleId]needleState{}
err = idx.WalkIndexFile(idxFile, func(key types.NeedleId, offset types.Offset, size Size) error {
if offset.IsZero() || size == types.TombstoneFileSize {
err = idx.WalkIndexFile(idxFile, func(key types.NeedleId, offset types.Offset, size types.Size) error {
if offset.IsZero() || size < 0 || size == types.TombstoneFileSize {
files[key] = needleState{ files[key] = needleState{
state: stateDeleted, state: stateDeleted,
size: size, size: size,

4
unmaintained/fix_dat/fix_dat.go

@ -98,7 +98,7 @@ func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, vis
// parse index file entry // parse index file entry
key := util.BytesToUint64(bytes[0:8]) key := util.BytesToUint64(bytes[0:8])
offsetFromIndex := util.BytesToUint32(bytes[8:12]) offsetFromIndex := util.BytesToUint32(bytes[8:12])
sizeFromIndex := util.BytesToUint32(bytes[12:16])
sizeFromIndex := types.BytesToSize(bytes[12:16])
count, _ = idxFile.ReadAt(bytes, readerOffset) count, _ = idxFile.ReadAt(bytes, readerOffset)
readerOffset += int64(count) readerOffset += int64(count)
@ -123,7 +123,7 @@ func iterateEntries(datBackend backend.BackendStorageFile, idxFile *os.File, vis
} }
}() }()
if n.Size <= n.DataSize {
if n.Size <= types.Size(n.DataSize) {
continue continue
} }
visitNeedle(n, offset) visitNeedle(n, offset)

83
unmaintained/see_dat/see_dat_gzip.go

@ -1,83 +0,0 @@
package main
import (
"bytes"
"compress/gzip"
"crypto/md5"
"flag"
"io"
"io/ioutil"
"net/http"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
"github.com/chrislusf/seaweedfs/weed/util"
)
type VolumeFileScanner4SeeDat struct {
version needle.Version
}
func (scanner *VolumeFileScanner4SeeDat) VisitSuperBlock(superBlock super_block.SuperBlock) error {
scanner.version = superBlock.Version
return nil
}
func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
return true
}
var (
files = int64(0)
filebytes = int64(0)
diffbytes = int64(0)
)
func Compresssion(data []byte) float64 {
if len(data) <= 128 {
return 100.0
}
compressed, _ := util.GzipData(data[0:128])
return float64(len(compressed)*10) / 1280.0
}
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
glog.V(0).Info("----------------------------------------------------------------------------------")
glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v hasmime[%t] mime[%s] (len: %d)",
*volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t, n.HasMime(), string(n.Mime), len(n.Mime))
r, err := gzip.NewReader(bytes.NewReader(n.Data))
if err == nil {
buf := bytes.Buffer{}
h := md5.New()
c, _ := io.Copy(&buf, r)
d := buf.Bytes()
io.Copy(h, bytes.NewReader(d))
diff := (int64(n.DataSize) - int64(c))
diffbytes += diff
glog.V(0).Infof("was gzip! stored_size: %d orig_size: %d diff: %d(%d) mime:%s compression-of-128: %.2f md5: %x", n.DataSize, c, diff, diffbytes, http.DetectContentType(d), Compresssion(d), h.Sum(nil))
} else {
glog.V(0).Infof("no gzip!")
}
return nil
}
var (
_ = ioutil.ReadAll
volumePath = flag.String("dir", "/tmp", "data directory to store files")
volumeCollection = flag.String("collection", "", "the volume collection name")
volumeId = flag.Int("volumeId", -1, "a volume id. The volume should already exist in the dir. The volume index file should not exist.")
)
func main() {
flag.Parse()
vid := needle.VolumeId(*volumeId)
glog.V(0).Info("Starting")
scanner := &VolumeFileScanner4SeeDat{}
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
if err != nil {
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
}
}

2
unmaintained/see_idx/see_idx.go

@ -36,7 +36,7 @@ func main() {
} }
defer indexFile.Close() defer indexFile.Close()
idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size Size) error {
idx.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size types.Size) error {
fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size))) fmt.Printf("key:%v offset:%v size:%v(%v)\n", key, offset, size, util.BytesToHumanReadable(uint64(size)))
return nil return nil
}) })

Loading…
Cancel
Save