Browse Source

Merge pull request #10 from chrislusf/master

sync
pull/1480/head
hilimd 4 years ago
committed by GitHub
parent
commit
6769d07604
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      other/java/client/pom.xml
  2. 2
      other/java/client/pom.xml.deploy
  3. 2
      other/java/client/pom_debug.xml
  4. 1
      other/java/client/src/main/java/seaweedfs/client/ChunkCache.java
  5. 3
      other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java
  6. 8
      other/java/client/src/main/java/seaweedfs/client/Gzip.java
  7. 25
      other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java
  8. 2
      other/java/hdfs2/dependency-reduced-pom.xml
  9. 2
      other/java/hdfs2/pom.xml
  10. 2
      other/java/hdfs3/dependency-reduced-pom.xml
  11. 2
      other/java/hdfs3/pom.xml
  12. 2
      weed/operation/chunked_file.go
  13. 4
      weed/server/volume_server_handlers_read.go
  14. 4
      weed/stats/disk.go
  15. 3
      weed/storage/needle/needle.go
  16. 4
      weed/storage/needle/needle_parse_upload.go
  17. 2
      weed/util/compression.go
  18. 56
      weed/util/http_util.go

2
other/java/client/pom.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.4.3</version>
<version>1.4.4</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

2
other/java/client/pom.xml.deploy

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.4.3</version>
<version>1.4.4</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

2
other/java/client/pom_debug.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId> <groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId> <artifactId>seaweedfs-client</artifactId>
<version>1.4.3</version>
<version>1.4.4</version>
<parent> <parent>
<groupId>org.sonatype.oss</groupId> <groupId>org.sonatype.oss</groupId>

1
other/java/client/src/main/java/seaweedfs/client/ChunkCache.java

@ -15,6 +15,7 @@ public class ChunkCache {
} }
this.cache = CacheBuilder.newBuilder() this.cache = CacheBuilder.newBuilder()
.maximumSize(maxEntries) .maximumSize(maxEntries)
.weakValues()
.expireAfterAccess(1, TimeUnit.HOURS) .expireAfterAccess(1, TimeUnit.HOURS)
.build(); .build();
} }

3
other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java

@ -76,8 +76,11 @@ public class FileChunkManifest {
LOG.debug("doFetchFullChunkData:{}", chunkView); LOG.debug("doFetchFullChunkData:{}", chunkView);
chunkData = SeaweedRead.doFetchFullChunkData(chunkView, locations); chunkData = SeaweedRead.doFetchFullChunkData(chunkView, locations);
} }
ifchunk.getIsChunkManifest()){
// only cache manifest chunks
LOG.debug("chunk {} size {}", chunkView.fileId, chunkData.length); LOG.debug("chunk {} size {}", chunkView.fileId, chunkData.length);
SeaweedRead.chunkCache.setChunk(chunkView.fileId, chunkData); SeaweedRead.chunkCache.setChunk(chunkView.fileId, chunkData);
}
return chunkData; return chunkData;

8
other/java/client/src/main/java/seaweedfs/client/Gzip.java

@ -18,14 +18,18 @@ public class Gzip {
return compressed; return compressed;
} }
public static byte[] decompress(byte[] compressed) throws IOException {
public static byte[] decompress(byte[] compressed) {
try {
ByteArrayInputStream bis = new ByteArrayInputStream(compressed); ByteArrayInputStream bis = new ByteArrayInputStream(compressed);
GZIPInputStream gis = new GZIPInputStream(bis); GZIPInputStream gis = new GZIPInputStream(bis);
return readAll(gis); return readAll(gis);
} catch (Exception e) {
return compressed;
}
} }
private static byte[] readAll(InputStream input) throws IOException { private static byte[] readAll(InputStream input) throws IOException {
try( ByteArrayOutputStream output = new ByteArrayOutputStream()){
try (ByteArrayOutputStream output = new ByteArrayOutputStream()) {
byte[] buffer = new byte[4096]; byte[] buffer = new byte[4096];
int n; int n;
while (-1 != (n = input.read(buffer))) { while (-1 != (n = input.read(buffer))) {

25
other/java/client/src/main/java/seaweedfs/client/SeaweedRead.java

@ -1,7 +1,10 @@
package seaweedfs.client; package seaweedfs.client;
import org.apache.http.Header;
import org.apache.http.HeaderElement;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.HttpHeaders; import org.apache.http.HttpHeaders;
import org.apache.http.client.entity.GzipDecompressingEntity;
import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpGet;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
@ -78,7 +81,7 @@ public class SeaweedRead {
HttpGet request = new HttpGet( HttpGet request = new HttpGet(
String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId)); String.format("http://%s/%s", locations.getLocations(0).getUrl(), chunkView.fileId));
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "");
request.setHeader(HttpHeaders.ACCEPT_ENCODING, "gzip");
byte[] data = null; byte[] data = null;
@ -87,6 +90,18 @@ public class SeaweedRead {
try { try {
HttpEntity entity = response.getEntity(); HttpEntity entity = response.getEntity();
Header contentEncodingHeader = entity.getContentEncoding();
if (contentEncodingHeader != null) {
HeaderElement[] encodings =contentEncodingHeader.getElements();
for (int i = 0; i < encodings.length; i++) {
if (encodings[i].getName().equalsIgnoreCase("gzip")) {
entity = new GzipDecompressingEntity(entity);
break;
}
}
}
data = EntityUtils.toByteArray(entity); data = EntityUtils.toByteArray(entity);
EntityUtils.consume(entity); EntityUtils.consume(entity);
@ -96,10 +111,6 @@ public class SeaweedRead {
request.releaseConnection(); request.releaseConnection();
} }
if (chunkView.isCompressed) {
data = Gzip.decompress(data);
}
if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) { if (chunkView.cipherKey != null && chunkView.cipherKey.length != 0) {
try { try {
data = SeaweedCipher.decrypt(data, chunkView.cipherKey); data = SeaweedCipher.decrypt(data, chunkView.cipherKey);
@ -108,6 +119,10 @@ public class SeaweedRead {
} }
} }
if (chunkView.isCompressed) {
data = Gzip.decompress(data);
}
LOG.debug("doFetchFullChunkData fid:{} chunkData.length:{}", chunkView.fileId, data.length); LOG.debug("doFetchFullChunkData fid:{} chunkData.length:{}", chunkView.fileId, data.length);
return data; return data;

2
other/java/hdfs2/dependency-reduced-pom.xml

@ -127,7 +127,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.4.3</seaweedfs.client.version>
<seaweedfs.client.version>1.4.4</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>
</project> </project>

2
other/java/hdfs2/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.4.3</seaweedfs.client.version>
<seaweedfs.client.version>1.4.4</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version> <hadoop.version>2.9.2</hadoop.version>
</properties> </properties>

2
other/java/hdfs3/dependency-reduced-pom.xml

@ -127,7 +127,7 @@
</snapshotRepository> </snapshotRepository>
</distributionManagement> </distributionManagement>
<properties> <properties>
<seaweedfs.client.version>1.4.3</seaweedfs.client.version>
<seaweedfs.client.version>1.4.4</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>
</project> </project>

2
other/java/hdfs3/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion> <modelVersion>4.0.0</modelVersion>
<properties> <properties>
<seaweedfs.client.version>1.4.3</seaweedfs.client.version>
<seaweedfs.client.version>1.4.4</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version> <hadoop.version>3.1.1</hadoop.version>
</properties> </properties>

2
weed/operation/chunked_file.go

@ -57,7 +57,7 @@ func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error)
if isCompressed { if isCompressed {
var err error var err error
if buffer, err = util.DecompressData(buffer); err != nil { if buffer, err = util.DecompressData(buffer); err != nil {
return nil, err
glog.V(0).Infof("fail to decompress chunk manifest: %v", err)
} }
} }
cm := ChunkManifest{} cm := ChunkManifest{}

4
weed/server/volume_server_handlers_read.go

@ -26,6 +26,8 @@ var fileNameEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"")
func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) { func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) {
// println(r.Method + " " + r.URL.Path)
stats.VolumeServerRequestCounter.WithLabelValues("get").Inc() stats.VolumeServerRequestCounter.WithLabelValues("get").Inc()
start := time.Now() start := time.Now()
defer func() { stats.VolumeServerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) }() defer func() { stats.VolumeServerRequestHistogram.WithLabelValues("get").Observe(time.Since(start).Seconds()) }()
@ -142,7 +144,6 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
} }
} }
if ext != ".gz" && ext != ".zst" {
if n.IsCompressed() { if n.IsCompressed() {
if _, _, _, shouldResize := shouldResizeImages(ext, r); shouldResize { if _, _, _, shouldResize := shouldResizeImages(ext, r); shouldResize {
if n.Data, err = util.DecompressData(n.Data); err != nil { if n.Data, err = util.DecompressData(n.Data); err != nil {
@ -158,7 +159,6 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
} }
} }
} }
}
rs := conditionallyResizeImages(bytes.NewReader(n.Data), ext, r) rs := conditionallyResizeImages(bytes.NewReader(n.Data), ext, r)

4
weed/stats/disk.go

@ -8,6 +8,8 @@ import (
func NewDiskStatus(path string) (disk *volume_server_pb.DiskStatus) { func NewDiskStatus(path string) (disk *volume_server_pb.DiskStatus) {
disk = &volume_server_pb.DiskStatus{Dir: path} disk = &volume_server_pb.DiskStatus{Dir: path}
fillInDiskStatus(disk) fillInDiskStatus(disk)
glog.V(2).Infof("read disk size: %v", disk)
if disk.PercentUsed > 95 {
glog.V(0).Infof("disk status: %v", disk)
}
return return
} }

3
weed/storage/needle/needle.go

@ -44,7 +44,7 @@ type Needle struct {
} }
func (n *Needle) String() (str string) { func (n *Needle) String() (str string) {
str = fmt.Sprintf("%s Size:%d, DataSize:%d, Name:%s, Mime:%s", formatNeedleIdCookie(n.Id, n.Cookie), n.Size, n.DataSize, n.Name, n.Mime)
str = fmt.Sprintf("%s Size:%d, DataSize:%d, Name:%s, Mime:%s Compressed:%v", formatNeedleIdCookie(n.Id, n.Cookie), n.Size, n.DataSize, n.Name, n.Mime, n.IsCompressed())
return return
} }
@ -81,6 +81,7 @@ func CreateNeedleFromRequest(r *http.Request, fixJpgOrientation bool, sizeLimit
} }
} }
if pu.IsGzipped { if pu.IsGzipped {
// println(r.URL.Path, "is set to compressed", pu.FileName, pu.IsGzipped, "dataSize", pu.OriginalDataSize)
n.SetIsCompressed() n.SetIsCompressed()
} }
if n.LastModified == 0 { if n.LastModified == 0 {

4
weed/storage/needle/needle_parse_upload.go

@ -54,7 +54,7 @@ func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) {
pu.OriginalDataSize = len(pu.Data) pu.OriginalDataSize = len(pu.Data)
pu.UncompressedData = pu.Data pu.UncompressedData = pu.Data
// println("received data", len(pu.Data), "isGzipped", pu.IsCompressed, "mime", pu.MimeType, "name", pu.FileName)
// println("received data", len(pu.Data), "isGzipped", pu.IsGzipped, "mime", pu.MimeType, "name", pu.FileName)
if pu.IsGzipped { if pu.IsGzipped {
if unzipped, e := util.DecompressData(pu.Data); e == nil { if unzipped, e := util.DecompressData(pu.Data); e == nil {
pu.OriginalDataSize = len(unzipped) pu.OriginalDataSize = len(unzipped)
@ -72,7 +72,7 @@ func ParseUpload(r *http.Request, sizeLimit int64) (pu *ParsedUpload, e error) {
mimeType = "" mimeType = ""
} }
if shouldBeCompressed, iAmSure := util.IsCompressableFileType(ext, mimeType); mimeType == "" && !iAmSure || shouldBeCompressed && iAmSure { if shouldBeCompressed, iAmSure := util.IsCompressableFileType(ext, mimeType); mimeType == "" && !iAmSure || shouldBeCompressed && iAmSure {
// println("ext", ext, "iAmSure", iAmSure, "shouldGzip", shouldGzip, "mimeType", pu.MimeType)
// println("ext", ext, "iAmSure", iAmSure, "shouldBeCompressed", shouldBeCompressed, "mimeType", pu.MimeType)
if compressedData, err := util.GzipData(pu.Data); err == nil { if compressedData, err := util.GzipData(pu.Data); err == nil {
if len(compressedData)*10 < len(pu.Data)*9 { if len(compressedData)*10 < len(pu.Data)*9 {
pu.Data = compressedData pu.Data = compressedData

2
weed/util/compression.go

@ -39,7 +39,7 @@ func DecompressData(input []byte) ([]byte, error) {
if IsZstdContent(input) { if IsZstdContent(input) {
return unzstdData(input) return unzstdData(input)
} }
return nil, fmt.Errorf("unsupported compression")
return input, fmt.Errorf("unsupported compression")
} }
func ungzipData(input []byte) ([]byte, error) { func ungzipData(input []byte) ([]byte, error) {

56
weed/util/http_util.go

@ -68,14 +68,28 @@ func Post(url string, values url.Values) ([]byte, error) {
// github.com/chrislusf/seaweedfs/unmaintained/repeated_vacuum/repeated_vacuum.go // github.com/chrislusf/seaweedfs/unmaintained/repeated_vacuum/repeated_vacuum.go
// may need increasing http.Client.Timeout // may need increasing http.Client.Timeout
func Get(url string) ([]byte, error) { func Get(url string) ([]byte, error) {
r, err := client.Get(url)
request, err := http.NewRequest("GET", url, nil)
request.Header.Add("Accept-Encoding", "gzip")
response, err := client.Do(request)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if r.StatusCode >= 400 {
return nil, fmt.Errorf("%s: %s", url, r.Status)
defer response.Body.Close()
var reader io.ReadCloser
switch response.Header.Get("Content-Encoding") {
case "gzip":
reader, err = gzip.NewReader(response.Body)
defer reader.Close()
default:
reader = response.Body
}
b, err := ioutil.ReadAll(reader)
if response.StatusCode >= 400 {
return nil, fmt.Errorf("%s: %s", url, response.Status)
} }
if err != nil { if err != nil {
return nil, err return nil, err
@ -269,7 +283,9 @@ func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, is
return err return err
} }
if !isFullChunk {
if isFullChunk {
req.Header.Add("Accept-Encoding", "gzip")
} else {
req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1)) req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+int64(size)-1))
} }
@ -282,13 +298,23 @@ func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, is
return fmt.Errorf("%s: %s", fileUrl, r.Status) return fmt.Errorf("%s: %s", fileUrl, r.Status)
} }
var reader io.ReadCloser
contentEncoding := r.Header.Get("Content-Encoding")
switch contentEncoding {
case "gzip":
reader, err = gzip.NewReader(r.Body)
defer reader.Close()
default:
reader = r.Body
}
var ( var (
m int m int
) )
buf := make([]byte, 64*1024) buf := make([]byte, 64*1024)
for { for {
m, err = r.Body.Read(buf)
m, err = reader.Read(buf)
fn(buf[:m]) fn(buf[:m])
if err == io.EOF { if err == io.EOF {
return nil return nil
@ -312,7 +338,7 @@ func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentCompressed bool
if isContentCompressed { if isContentCompressed {
decryptedData, err = DecompressData(decryptedData) decryptedData, err = DecompressData(decryptedData)
if err != nil { if err != nil {
return fmt.Errorf("unzip decrypt %s: %v", fileUrl, err)
glog.V(0).Infof("unzip decrypt %s: %v", fileUrl, err)
} }
} }
if len(decryptedData) < int(offset)+size { if len(decryptedData) < int(offset)+size {
@ -334,6 +360,8 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e
} }
if rangeHeader != "" { if rangeHeader != "" {
req.Header.Add("Range", rangeHeader) req.Header.Add("Range", rangeHeader)
} else {
req.Header.Add("Accept-Encoding", "gzip")
} }
r, err := client.Do(req) r, err := client.Do(req)
@ -344,7 +372,17 @@ func ReadUrlAsReaderCloser(fileUrl string, rangeHeader string) (io.ReadCloser, e
return nil, fmt.Errorf("%s: %s", fileUrl, r.Status) return nil, fmt.Errorf("%s: %s", fileUrl, r.Status)
} }
return r.Body, nil
var reader io.ReadCloser
contentEncoding := r.Header.Get("Content-Encoding")
switch contentEncoding {
case "gzip":
reader, err = gzip.NewReader(r.Body)
defer reader.Close()
default:
reader = r.Body
}
return reader, nil
} }
func CloseResponse(resp *http.Response) { func CloseResponse(resp *http.Response) {

Loading…
Cancel
Save