Browse Source

HCFS: 1.5.2

pull/1566/head
Chris Lu 4 years ago
parent
commit
596d476e3d
  1. 2
      other/java/client/pom.xml
  2. 2
      other/java/client/pom.xml.deploy
  3. 2
      other/java/client/pom_debug.xml
  4. 8
      other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java
  5. 11
      other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java
  6. 2
      other/java/hdfs2/dependency-reduced-pom.xml
  7. 2
      other/java/hdfs2/pom.xml
  8. 2
      other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java
  9. 2
      other/java/hdfs3/dependency-reduced-pom.xml
  10. 2
      other/java/hdfs3/pom.xml
  11. 3
      other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java

2
other/java/client/pom.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.1</version>
<version>1.5.2</version>
<parent>
<groupId>org.sonatype.oss</groupId>

2
other/java/client/pom.xml.deploy

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.1</version>
<version>1.5.2</version>
<parent>
<groupId>org.sonatype.oss</groupId>

2
other/java/client/pom_debug.xml

@ -5,7 +5,7 @@
<groupId>com.github.chrislusf</groupId>
<artifactId>seaweedfs-client</artifactId>
<version>1.5.1</version>
<version>1.5.2</version>
<parent>
<groupId>org.sonatype.oss</groupId>

8
other/java/client/src/main/java/seaweedfs/client/FileChunkManifest.java

@ -86,7 +86,7 @@ public class FileChunkManifest {
}
public static List<FilerProto.FileChunk> maybeManifestize(
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> inputChunks) throws IOException {
final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> inputChunks, String parentDirectory) throws IOException {
// the return variable
List<FilerProto.FileChunk> chunks = new ArrayList<>();
@ -101,7 +101,7 @@ public class FileChunkManifest {
int remaining = dataChunks.size();
for (int i = 0; i + mergeFactor < dataChunks.size(); i += mergeFactor) {
FilerProto.FileChunk chunk = mergeIntoManifest(filerGrpcClient, dataChunks.subList(i, i + mergeFactor));
FilerProto.FileChunk chunk = mergeIntoManifest(filerGrpcClient, dataChunks.subList(i, i + mergeFactor), parentDirectory);
chunks.add(chunk);
remaining -= mergeFactor;
}
@ -113,7 +113,7 @@ public class FileChunkManifest {
return chunks;
}
private static FilerProto.FileChunk mergeIntoManifest(final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> dataChunks) throws IOException {
private static FilerProto.FileChunk mergeIntoManifest(final FilerGrpcClient filerGrpcClient, List<FilerProto.FileChunk> dataChunks, String parentDirectory) throws IOException {
// create and serialize the manifest
dataChunks = FilerClient.beforeEntrySerialization(dataChunks);
FilerProto.FileChunkManifest.Builder m = FilerProto.FileChunkManifest.newBuilder().addAllChunks(dataChunks);
@ -130,7 +130,7 @@ public class FileChunkManifest {
filerGrpcClient.getReplication(),
filerGrpcClient,
minOffset,
data, 0, data.length);
data, 0, data.length, parentDirectory);
manifestChunk.setIsChunkManifest(true);
manifestChunk.setSize(maxOffset - minOffset);
return manifestChunk.build();

11
other/java/client/src/main/java/seaweedfs/client/SeaweedWrite.java

@ -26,9 +26,10 @@ public class SeaweedWrite {
final FilerGrpcClient filerGrpcClient,
final long offset,
final byte[] bytes,
final long bytesOffset, final long bytesLength) throws IOException {
final long bytesOffset, final long bytesLength,
final String parentPath) throws IOException {
FilerProto.FileChunk.Builder chunkBuilder = writeChunk(
replication, filerGrpcClient, offset, bytes, bytesOffset, bytesLength);
replication, filerGrpcClient, offset, bytes, bytesOffset, bytesLength, parentPath);
synchronized (entry) {
entry.addChunks(chunkBuilder);
}
@ -39,13 +40,15 @@ public class SeaweedWrite {
final long offset,
final byte[] bytes,
final long bytesOffset,
final long bytesLength) throws IOException {
final long bytesLength,
final String parentPath) throws IOException {
FilerProto.AssignVolumeResponse response = filerGrpcClient.getBlockingStub().assignVolume(
FilerProto.AssignVolumeRequest.newBuilder()
.setCollection(filerGrpcClient.getCollection())
.setReplication(replication == null ? filerGrpcClient.getReplication() : replication)
.setDataCenter("")
.setTtlSec(0)
.setParentPath(parentPath)
.build());
String fileId = response.getFileId();
String url = response.getUrl();
@ -77,7 +80,7 @@ public class SeaweedWrite {
final FilerProto.Entry.Builder entry) throws IOException {
synchronized (entry) {
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerGrpcClient, entry.getChunksList());
List<FilerProto.FileChunk> chunks = FileChunkManifest.maybeManifestize(filerGrpcClient, entry.getChunksList(), parentDirectory);
entry.clearChunks();
entry.addAllChunks(chunks);
filerGrpcClient.getBlockingStub().createEntry(

2
other/java/hdfs2/dependency-reduced-pom.xml

@ -301,7 +301,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
<seaweedfs.client.version>1.5.1</seaweedfs.client.version>
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>
</project>

2
other/java/hdfs2/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
<seaweedfs.client.version>1.5.1</seaweedfs.client.version>
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<hadoop.version>2.9.2</hadoop.version>
</properties>

2
other/java/hdfs2/src/main/java/seaweed/hdfs/SeaweedOutputStream.java

@ -185,7 +185,7 @@ public class SeaweedOutputStream extends OutputStream {
}
final Future<Void> job = completionService.submit(() -> {
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit());
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), getParentDirectory(path));
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
ByteBufferPool.release(bufferToWrite);
return null;

2
other/java/hdfs3/dependency-reduced-pom.xml

@ -309,7 +309,7 @@
</snapshotRepository>
</distributionManagement>
<properties>
<seaweedfs.client.version>1.5.1</seaweedfs.client.version>
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>
</project>

2
other/java/hdfs3/pom.xml

@ -5,7 +5,7 @@
<modelVersion>4.0.0</modelVersion>
<properties>
<seaweedfs.client.version>1.5.1</seaweedfs.client.version>
<seaweedfs.client.version>1.5.2</seaweedfs.client.version>
<hadoop.version>3.1.1</hadoop.version>
</properties>

3
other/java/hdfs3/src/main/java/seaweed/hdfs/SeaweedOutputStream.java

@ -18,7 +18,6 @@ import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Locale;
import java.util.concurrent.*;
@ -232,7 +231,7 @@ public class SeaweedOutputStream extends OutputStream implements Syncable, Strea
}
final Future<Void> job = completionService.submit(() -> {
// System.out.println(path + " is going to save [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit());
SeaweedWrite.writeData(entry, replication, filerGrpcClient, writePosition, bufferToWrite.array(), bufferToWrite.position(), bufferToWrite.limit(), getParentDirectory(path));
// System.out.println(path + " saved [" + (writePosition) + "," + ((writePosition) + bytesLength) + ")");
ByteBufferPool.release(bufferToWrite);
return null;

Loading…
Cancel
Save