Browse Source

Hadoop: 1.3.3

improve memory efficiency
pull/1390/head
Chris Lu 5 years ago
parent
commit
bc3be0bb37
  1. 2
      other/java/client/pom.xml
  2. 2
      other/java/client/pom.xml.deploy
  3. 2
      other/java/client/pom_debug.xml
  4. 2
      other/java/hdfs2/dependency-reduced-pom.xml
  5. 2
      other/java/hdfs2/pom.xml
  6. 27
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
  7. 2
      other/java/hdfs3/dependency-reduced-pom.xml
  8. 2
      other/java/hdfs3/pom.xml
  9. 36
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java

2
other/java/client/pom.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.3.2</version>
<version>1.3.3</version>
<parent>
<groupId>org.sonatype.oss</groupId>

2
other/java/client/pom.xml.deploy

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.3.2</version>
<version>1.3.3</version>
<parent>
<groupId>org.sonatype.oss</groupId>

2
other/java/client/pom_debug.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.3.2</version>
<version>1.3.3</version>
<parent>
<groupId>org.sonatype.oss</groupId>

2
other/java/hdfs2/dependency-reduced-pom.xml

@ -127,7 +127,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
<seaweedfs.client.version>1.3.2</seaweedfs.client.version>
<seaweedfs.client.version>1.3.3</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>
</project>

2
other/java/hdfs2/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
<seaweedfs.client.version>1.3.2</seaweedfs.client.version>
<seaweedfs.client.version>1.3.3</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>

27
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java

@ -14,6 +14,7 @@ import seaweedfs.client.SeaweedWrite;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.OutputStream;
import java.util.Arrays;
import java.util.concurrent.*;
import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
@ -28,16 +29,16 @@ public class SeaweedOutputStream extends OutputStream {
private final int maxConcurrentRequestCount;
private final ThreadPoolExecutor threadExecutor;
private final ExecutorCompletionService<Void> completionService;
private FilerProto.Entry.Builder entry;
private final FilerProto.Entry.Builder entry;
private final boolean supportFlush = true;
private final ConcurrentLinkedDeque<WriteOperation> writeOperations;
private long position;
private boolean closed;
private boolean supportFlush = true;
private volatile IOException lastError;
private long lastFlushOffset;
private long lastTotalAppendOffset = 0;
private byte[] buffer;
private int bufferIndex;
private ConcurrentLinkedDeque<WriteOperation> writeOperations;
private String replication = "000";
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
@ -50,7 +51,7 @@ public class SeaweedOutputStream extends OutputStream {
this.lastError = null;
this.lastFlushOffset = 0;
this.bufferSize = bufferSize;
this.buffer = new byte[bufferSize];
// this.buffer = new byte[bufferSize];
this.bufferIndex = 0;
this.writeOperations = new ConcurrentLinkedDeque<>();
@ -98,6 +99,22 @@ public class SeaweedOutputStream extends OutputStream {
int numberOfBytesToWrite = length;
while (numberOfBytesToWrite > 0) {
if (buffer == null) {
buffer = new byte[32];
}
// ensureCapacity
if (numberOfBytesToWrite > buffer.length - bufferIndex) {
int capacity = buffer.length;
while(capacity-bufferIndex<numberOfBytesToWrite){
capacity = capacity << 1;
}
if (capacity < 0) {
throw new OutOfMemoryError();
}
buffer = Arrays.copyOf(buffer, capacity);
}
if (writableBytes <= numberOfBytesToWrite) {
System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes);
bufferIndex += writableBytes;
@ -165,7 +182,7 @@ public class SeaweedOutputStream extends OutputStream {
final byte[] bytes = buffer;
final int bytesLength = bufferIndex;
buffer = new byte[bufferSize];
buffer = null; // new byte[bufferSize];
bufferIndex = 0;
final long offset = position;
position += bytesLength;

2
other/java/hdfs3/dependency-reduced-pom.xml

@ -127,7 +127,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
<seaweedfs.client.version>1.3.2</seaweedfs.client.version>
<seaweedfs.client.version>1.3.3</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>
</project>

2
other/java/hdfs3/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
<seaweedfs.client.version>1.3.2</seaweedfs.client.version>
<seaweedfs.client.version>1.3.3</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>

36
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java

@ -16,14 +16,8 @@ import seaweedfs.client.SeaweedWrite;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.OutputStream;
import java.util.Locale;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.Arrays;
import java.util.concurrent.*;
import static seaweed.hdfs.SeaweedFileSystemStore.getParentDirectory;
@ -37,16 +31,16 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
private final int maxConcurrentRequestCount;
private final ThreadPoolExecutor threadExecutor;
private final ExecutorCompletionService<Void> completionService;
private FilerProto.Entry.Builder entry;
private final FilerProto.Entry.Builder entry;
private final boolean supportFlush = true;
private final ConcurrentLinkedDeque<WriteOperation> writeOperations;
private long position;
private boolean closed;
private boolean supportFlush = true;
private volatile IOException lastError;
private long lastFlushOffset;
private long lastTotalAppendOffset = 0;
private byte[] buffer;
private int bufferIndex;
private ConcurrentLinkedDeque<WriteOperation> writeOperations;
private String replication = "000";
public SeaweedOutputStream(FilerGrpcClient filerGrpcClient, final Path path, FilerProto.Entry.Builder entry,
@ -59,7 +53,7 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
this.lastError = null;
this.lastFlushOffset = 0;
this.bufferSize = bufferSize;
this.buffer = new byte[bufferSize];
// this.buffer = new byte[bufferSize];
this.bufferIndex = 0;
this.writeOperations = new ConcurrentLinkedDeque<>();
@ -107,6 +101,22 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
int numberOfBytesToWrite = length;
while (numberOfBytesToWrite > 0) {
if (buffer == null) {
buffer = new byte[32];
}
// ensureCapacity
if (numberOfBytesToWrite > buffer.length - bufferIndex) {
int capacity = buffer.length;
while(capacity-bufferIndex<numberOfBytesToWrite){
capacity = capacity << 1;
}
if (capacity < 0) {
throw new OutOfMemoryError();
}
buffer = Arrays.copyOf(buffer, capacity);
}
if (writableBytes <= numberOfBytesToWrite) {
System.arraycopy(data, currentOffset, buffer, bufferIndex, writableBytes);
bufferIndex += writableBytes;
@ -217,7 +227,7 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
final byte[] bytes = buffer;
final int bytesLength = bufferIndex;
buffer = new byte[bufferSize];
buffer = null; // new byte[bufferSize];
bufferIndex = 0;
final long offset = position;
position += bytesLength;

Loading…
Cancel
Save